python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
#include "perf_regs.h"
#include "../../../util/unwind-libdw.h"
#include "../../../util/perf_regs.h"
#include "../../../util/sample.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
struct unwind_info *ui = arg;
struct regs_dump *user_regs = &ui->sample->user_regs;
Dwarf_Word dwarf_regs[PERF_REG_ARM_MAX];
#define REG(r) ({ \
Dwarf_Word val = 0; \
perf_reg_value(&val, user_regs, PERF_REG_ARM_##r); \
val; \
})
dwarf_regs[0] = REG(R0);
dwarf_regs[1] = REG(R1);
dwarf_regs[2] = REG(R2);
dwarf_regs[3] = REG(R3);
dwarf_regs[4] = REG(R4);
dwarf_regs[5] = REG(R5);
dwarf_regs[6] = REG(R6);
dwarf_regs[7] = REG(R7);
dwarf_regs[8] = REG(R8);
dwarf_regs[9] = REG(R9);
dwarf_regs[10] = REG(R10);
dwarf_regs[11] = REG(FP);
dwarf_regs[12] = REG(IP);
dwarf_regs[13] = REG(SP);
dwarf_regs[14] = REG(LR);
dwarf_regs[15] = REG(PC);
return dwfl_thread_state_registers(thread, 0, PERF_REG_ARM_MAX,
dwarf_regs);
}
| linux-master | tools/perf/arch/arm/util/unwind-libdw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (C) 2010 Will Deacon, ARM Ltd.
*/
#include <stddef.h>
#include <linux/stringify.h>
#include <dwarf-regs.h>
struct pt_regs_dwarfnum {
const char *name;
unsigned int dwarfnum;
};
#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
#define GPR_DWARFNUM_NAME(num) \
{.name = __stringify(%r##num), .dwarfnum = num}
#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
/*
* Reference:
* http://infocenter.arm.com/help/topic/com.arm.doc.ihi0040a/IHI0040A_aadwarf.pdf
*/
static const struct pt_regs_dwarfnum regdwarfnum_table[] = {
GPR_DWARFNUM_NAME(0),
GPR_DWARFNUM_NAME(1),
GPR_DWARFNUM_NAME(2),
GPR_DWARFNUM_NAME(3),
GPR_DWARFNUM_NAME(4),
GPR_DWARFNUM_NAME(5),
GPR_DWARFNUM_NAME(6),
GPR_DWARFNUM_NAME(7),
GPR_DWARFNUM_NAME(8),
GPR_DWARFNUM_NAME(9),
GPR_DWARFNUM_NAME(10),
REG_DWARFNUM_NAME("%fp", 11),
REG_DWARFNUM_NAME("%ip", 12),
REG_DWARFNUM_NAME("%sp", 13),
REG_DWARFNUM_NAME("%lr", 14),
REG_DWARFNUM_NAME("%pc", 15),
REG_DWARFNUM_END,
};
/**
* get_arch_regstr() - lookup register name from it's DWARF register number
* @n: the DWARF register number
*
* get_arch_regstr() returns the name of the register in struct
* regdwarfnum_table from it's DWARF register number. If the register is not
* found in the table, this returns NULL;
*/
const char *get_arch_regstr(unsigned int n)
{
const struct pt_regs_dwarfnum *roff;
for (roff = regdwarfnum_table; roff->name != NULL; roff++)
if (roff->dwarfnum == n)
return roff->name;
return NULL;
}
| linux-master | tools/perf/arch/arm/util/dwarf-regs.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <libunwind.h>
#include "perf_regs.h"
#include "../../../util/unwind.h"
#include "../../../util/debug.h"
int libunwind__arch_reg_id(int regnum)
{
switch (regnum) {
case UNW_ARM_R0:
return PERF_REG_ARM_R0;
case UNW_ARM_R1:
return PERF_REG_ARM_R1;
case UNW_ARM_R2:
return PERF_REG_ARM_R2;
case UNW_ARM_R3:
return PERF_REG_ARM_R3;
case UNW_ARM_R4:
return PERF_REG_ARM_R4;
case UNW_ARM_R5:
return PERF_REG_ARM_R5;
case UNW_ARM_R6:
return PERF_REG_ARM_R6;
case UNW_ARM_R7:
return PERF_REG_ARM_R7;
case UNW_ARM_R8:
return PERF_REG_ARM_R8;
case UNW_ARM_R9:
return PERF_REG_ARM_R9;
case UNW_ARM_R10:
return PERF_REG_ARM_R10;
case UNW_ARM_R11:
return PERF_REG_ARM_FP;
case UNW_ARM_R12:
return PERF_REG_ARM_IP;
case UNW_ARM_R13:
return PERF_REG_ARM_SP;
case UNW_ARM_R14:
return PERF_REG_ARM_LR;
case UNW_ARM_R15:
return PERF_REG_ARM_PC;
default:
pr_err("unwind: invalid reg id %d\n", regnum);
return -EINVAL;
}
return -EINVAL;
}
| linux-master | tools/perf/arch/arm/util/unwind-libunwind.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <[email protected]>
*/
#include <api/fs/fs.h>
#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/coresight-pmu.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/zalloc.h>
#include "cs-etm.h"
#include "../../../util/debug.h"
#include "../../../util/record.h"
#include "../../../util/auxtrace.h"
#include "../../../util/cpumap.h"
#include "../../../util/event.h"
#include "../../../util/evlist.h"
#include "../../../util/evsel.h"
#include "../../../util/perf_api_probe.h"
#include "../../../util/evsel_config.h"
#include "../../../util/pmus.h"
#include "../../../util/cs-etm.h"
#include <internal/lib.h> // page_size
#include "../../../util/session.h"
#include <errno.h>
#include <stdlib.h>
#include <sys/stat.h>
struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
struct evlist *evlist;
bool snapshot_mode;
size_t snapshot_size;
};
static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
[CS_ETM_ETMCCER] = "mgmt/etmccer",
[CS_ETM_ETMIDR] = "mgmt/etmidr",
};
static const char * const metadata_etmv4_ro[] = {
[CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
[CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
[CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
[CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
[CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
[CS_ETMV4_TS_SOURCE] = "ts_source",
};
static const char * const metadata_ete_ro[] = {
[CS_ETE_TRCIDR0] = "trcidr/trcidr0",
[CS_ETE_TRCIDR1] = "trcidr/trcidr1",
[CS_ETE_TRCIDR2] = "trcidr/trcidr2",
[CS_ETE_TRCIDR8] = "trcidr/trcidr8",
[CS_ETE_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
[CS_ETE_TRCDEVARCH] = "mgmt/trcdevarch",
[CS_ETE_TS_SOURCE] = "ts_source",
};
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu);
static int cs_etm_validate_context_id(struct auxtrace_record *itr,
struct evsel *evsel, int cpu)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
char path[PATH_MAX];
int err;
u32 val;
u64 contextid = evsel->core.attr.config &
(perf_pmu__format_bits(cs_etm_pmu, "contextid") |
perf_pmu__format_bits(cs_etm_pmu, "contextid1") |
perf_pmu__format_bits(cs_etm_pmu, "contextid2"));
if (!contextid)
return 0;
/* Not supported in etmv3 */
if (!cs_etm_is_etmv4(itr, cpu)) {
pr_err("%s: contextid not supported in ETMv3, disable with %s/contextid=0/\n",
CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
/* Get a handle on TRCIDR2 */
snprintf(path, PATH_MAX, "cpu%d/%s",
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
/* There was a problem reading the file, bailing out */
if (err != 1) {
pr_err("%s: can't read file %s\n", CORESIGHT_ETM_PMU_NAME,
path);
return err;
}
if (contextid &
perf_pmu__format_bits(cs_etm_pmu, "contextid1")) {
/*
* TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID
* tracing is supported:
* 0b00000 Context ID tracing is not supported.
* 0b00100 Maximum of 32-bit Context ID size.
* All other values are reserved.
*/
if (BMVAL(val, 5, 9) != 0x4) {
pr_err("%s: CONTEXTIDR_EL1 isn't supported, disable with %s/contextid1=0/\n",
CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
}
if (contextid &
perf_pmu__format_bits(cs_etm_pmu, "contextid2")) {
/*
* TRCIDR2.VMIDOPT[30:29] != 0 and
* TRCIDR2.VMIDSIZE[14:10] == 0b00100 (32bit virtual contextid)
* We can't support CONTEXTIDR in VMID if the size of the
* virtual context id is < 32bit.
* Any value of VMIDSIZE >= 4 (i.e, > 32bit) is fine for us.
*/
if (!BMVAL(val, 29, 30) || BMVAL(val, 10, 14) < 4) {
pr_err("%s: CONTEXTIDR_EL2 isn't supported, disable with %s/contextid2=0/\n",
CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
}
return 0;
}
static int cs_etm_validate_timestamp(struct auxtrace_record *itr,
struct evsel *evsel, int cpu)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
char path[PATH_MAX];
int err;
u32 val;
if (!(evsel->core.attr.config &
perf_pmu__format_bits(cs_etm_pmu, "timestamp")))
return 0;
if (!cs_etm_is_etmv4(itr, cpu)) {
pr_err("%s: timestamp not supported in ETMv3, disable with %s/timestamp=0/\n",
CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
/* Get a handle on TRCIRD0 */
snprintf(path, PATH_MAX, "cpu%d/%s",
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
/* There was a problem reading the file, bailing out */
if (err != 1) {
pr_err("%s: can't read file %s\n",
CORESIGHT_ETM_PMU_NAME, path);
return err;
}
/*
* TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping
* is supported:
* 0b00000 Global timestamping is not implemented
* 0b00110 Implementation supports a maximum timestamp of 48bits.
* 0b01000 Implementation supports a maximum timestamp of 64bits.
*/
val &= GENMASK(28, 24);
if (!val) {
return -EINVAL;
}
return 0;
}
/*
* Check whether the requested timestamp and contextid options should be
* available on all requested CPUs and if not, tell the user how to override.
* The kernel will silently disable any unavailable options so a warning here
* first is better. In theory the kernel could still disable the option for
* some other reason so this is best effort only.
*/
static int cs_etm_validate_config(struct auxtrace_record *itr,
struct evsel *evsel)
{
int i, err = -EINVAL;
struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* Set option of each CPU we have */
for (i = 0; i < cpu__max_cpu().cpu; i++) {
struct perf_cpu cpu = { .cpu = i, };
if (!perf_cpu_map__has(event_cpus, cpu) ||
!perf_cpu_map__has(online_cpus, cpu))
continue;
err = cs_etm_validate_context_id(itr, evsel, i);
if (err)
goto out;
err = cs_etm_validate_timestamp(itr, evsel, i);
if (err)
goto out;
}
err = 0;
out:
perf_cpu_map__put(online_cpus);
return err;
}
static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
struct record_opts *opts,
const char *str)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
unsigned long long snapshot_size = 0;
char *endptr;
if (str) {
snapshot_size = strtoull(str, &endptr, 0);
if (*endptr || snapshot_size > SIZE_MAX)
return -1;
}
opts->auxtrace_snapshot_mode = true;
opts->auxtrace_snapshot_size = snapshot_size;
ptr->snapshot_size = snapshot_size;
return 0;
}
static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
struct evsel *evsel)
{
char msg[BUFSIZ], path[PATH_MAX], *sink;
struct evsel_config_term *term;
int ret = -EINVAL;
u32 hash;
if (evsel->core.attr.config2 & GENMASK(31, 0))
return 0;
list_for_each_entry(term, &evsel->config_terms, list) {
if (term->type != EVSEL__CONFIG_TERM_DRV_CFG)
continue;
sink = term->val.str;
snprintf(path, PATH_MAX, "sinks/%s", sink);
ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
if (ret != 1) {
if (errno == ENOENT)
pr_err("Couldn't find sink \"%s\" on event %s\n"
"Missing kernel or device support?\n\n"
"Hint: An appropriate sink will be picked automatically if one isn't specified.\n",
sink, evsel__name(evsel));
else
pr_err("Failed to set sink \"%s\" on event %s with %d (%s)\n",
sink, evsel__name(evsel), errno,
str_error_r(errno, msg, sizeof(msg)));
return ret;
}
evsel->core.attr.config2 |= hash;
return 0;
}
/*
* No sink was provided on the command line - allow the CoreSight
* system to look for a default
*/
return 0;
}
static int cs_etm_recording_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
{
int ret;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct evsel *evsel, *cs_etm_evsel = NULL;
struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
int err = 0;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == cs_etm_pmu->type) {
if (cs_etm_evsel) {
pr_err("There may be only one %s event\n",
CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
cs_etm_evsel = evsel;
}
}
/* no need to continue if at least one event of interest was found */
if (!cs_etm_evsel)
return 0;
ptr->evlist = evlist;
ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
if (!record_opts__no_switch_events(opts) &&
perf_can_record_switch_events())
opts->record_switch_events = true;
cs_etm_evsel->needs_auxtrace_mmap = true;
opts->full_auxtrace = true;
ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
if (ret)
return ret;
if (opts->use_clockid) {
pr_err("Cannot use clockid (-k option) with %s\n",
CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
/* we are in snapshot mode */
if (opts->auxtrace_snapshot_mode) {
/*
* No size were given to '-S' or '-m,', so go with
* the default
*/
if (!opts->auxtrace_snapshot_size &&
!opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages =
KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
} else if (!opts->auxtrace_mmap_pages && !privileged &&
opts->mmap_pages == UINT_MAX) {
opts->mmap_pages = KiB(256) / page_size;
}
/*
* '-m,xyz' was specified but no snapshot size, so make the
* snapshot size as big as the auxtrace mmap area.
*/
if (!opts->auxtrace_snapshot_size) {
opts->auxtrace_snapshot_size =
opts->auxtrace_mmap_pages * (size_t)page_size;
}
/*
* -Sxyz was specified but no auxtrace mmap area, so make the
* auxtrace mmap area big enough to fit the requested snapshot
* size.
*/
if (!opts->auxtrace_mmap_pages) {
size_t sz = opts->auxtrace_snapshot_size;
sz = round_up(sz, page_size) / page_size;
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
}
/* Snapshot size can't be bigger than the auxtrace area */
if (opts->auxtrace_snapshot_size >
opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
opts->auxtrace_snapshot_size,
opts->auxtrace_mmap_pages * (size_t)page_size);
return -EINVAL;
}
/* Something went wrong somewhere - this shouldn't happen */
if (!opts->auxtrace_snapshot_size ||
!opts->auxtrace_mmap_pages) {
pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
return -EINVAL;
}
}
/* Buffer sizes weren't specified with '-m,xyz' so give some defaults */
if (!opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages = KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
}
if (opts->auxtrace_snapshot_mode)
pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
opts->auxtrace_snapshot_size);
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace
* event must come first.
*/
evlist__to_front(evlist, cs_etm_evsel);
/*
* get the CPU on the sample - need it to associate trace ID in the
* AUX_OUTPUT_HW_ID event, and the AUX event for per-cpu mmaps.
*/
evsel__set_sample_bit(cs_etm_evsel, CPU);
/*
* Also the case of per-cpu mmaps, need the contextID in order to be notified
* when a context switch happened.
*/
if (!perf_cpu_map__empty(cpus)) {
evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
"timestamp", 1);
evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
"contextid", 1);
}
/* Add dummy event to keep tracking */
err = parse_event(evlist, "dummy:u");
if (err)
goto out;
evsel = evlist__last(evlist);
evlist__set_tracking_event(evlist, evsel);
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */
if (!perf_cpu_map__empty(cpus))
evsel__set_sample_bit(evsel, TIME);
err = cs_etm_validate_config(itr, cs_etm_evsel);
out:
return err;
}
static u64 cs_etm_get_config(struct auxtrace_record *itr)
{
u64 config = 0;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct evlist *evlist = ptr->evlist;
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == cs_etm_pmu->type) {
/*
* Variable perf_event_attr::config is assigned to
* ETMv3/PTM. The bit fields have been made to match
* the ETMv3.5 ETRMCR register specification. See the
* PMU_FORMAT_ATTR() declarations in
* drivers/hwtracing/coresight/coresight-perf.c for
* details.
*/
config = evsel->core.attr.config;
break;
}
}
return config;
}
#ifndef BIT
#define BIT(N) (1UL << (N))
#endif
static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
{
u64 config = 0;
u64 config_opts = 0;
/*
* The perf event variable config bits represent both
* the command line options and register programming
* bits in ETMv3/PTM. For ETMv4 we must remap options
* to real bits
*/
config_opts = cs_etm_get_config(itr);
if (config_opts & BIT(ETM_OPT_CYCACC))
config |= BIT(ETM4_CFG_BIT_CYCACC);
if (config_opts & BIT(ETM_OPT_CTXTID))
config |= BIT(ETM4_CFG_BIT_CTXTID);
if (config_opts & BIT(ETM_OPT_TS))
config |= BIT(ETM4_CFG_BIT_TS);
if (config_opts & BIT(ETM_OPT_RETSTK))
config |= BIT(ETM4_CFG_BIT_RETSTK);
if (config_opts & BIT(ETM_OPT_CTXTID2))
config |= BIT(ETM4_CFG_BIT_VMID) |
BIT(ETM4_CFG_BIT_VMID_OPT);
if (config_opts & BIT(ETM_OPT_BRANCH_BROADCAST))
config |= BIT(ETM4_CFG_BIT_BB);
return config;
}
static size_t
cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct evlist *evlist __maybe_unused)
{
int i;
int etmv3 = 0, etmv4 = 0, ete = 0;
struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */
if (!perf_cpu_map__empty(event_cpus)) {
for (i = 0; i < cpu__max_cpu().cpu; i++) {
struct perf_cpu cpu = { .cpu = i, };
if (!perf_cpu_map__has(event_cpus, cpu) ||
!perf_cpu_map__has(online_cpus, cpu))
continue;
if (cs_etm_is_ete(itr, i))
ete++;
else if (cs_etm_is_etmv4(itr, i))
etmv4++;
else
etmv3++;
}
} else {
/* get configuration for all CPUs in the system */
for (i = 0; i < cpu__max_cpu().cpu; i++) {
struct perf_cpu cpu = { .cpu = i, };
if (!perf_cpu_map__has(online_cpus, cpu))
continue;
if (cs_etm_is_ete(itr, i))
ete++;
else if (cs_etm_is_etmv4(itr, i))
etmv4++;
else
etmv3++;
}
}
perf_cpu_map__put(online_cpus);
return (CS_ETM_HEADER_SIZE +
(ete * CS_ETE_PRIV_SIZE) +
(etmv4 * CS_ETMV4_PRIV_SIZE) +
(etmv3 * CS_ETMV3_PRIV_SIZE));
}
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
{
bool ret = false;
char path[PATH_MAX];
int scan;
unsigned int val;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
/* Take any of the RO files for ETMv4 and see if it present */
snprintf(path, PATH_MAX, "cpu%d/%s",
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
/* The file was read successfully, we have a winner */
if (scan == 1)
ret = true;
return ret;
}
static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
{
char pmu_path[PATH_MAX];
int scan;
unsigned int val = 0;
/* Get RO metadata from sysfs */
snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
if (scan != 1)
pr_err("%s: error reading: %s\n", __func__, pmu_path);
return val;
}
static int cs_etm_get_ro_signed(struct perf_pmu *pmu, int cpu, const char *path)
{
char pmu_path[PATH_MAX];
int scan;
int val = 0;
/* Get RO metadata from sysfs */
snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
scan = perf_pmu__scan_file(pmu, pmu_path, "%d", &val);
if (scan != 1)
pr_err("%s: error reading: %s\n", __func__, pmu_path);
return val;
}
static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, int cpu, const char *path)
{
char pmu_path[PATH_MAX];
/* Get RO metadata from sysfs */
snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
return perf_pmu__file_exists(pmu, pmu_path);
}
#define TRCDEVARCH_ARCHPART_SHIFT 0
#define TRCDEVARCH_ARCHPART_MASK GENMASK(11, 0)
#define TRCDEVARCH_ARCHPART(x) (((x) & TRCDEVARCH_ARCHPART_MASK) >> TRCDEVARCH_ARCHPART_SHIFT)
#define TRCDEVARCH_ARCHVER_SHIFT 12
#define TRCDEVARCH_ARCHVER_MASK GENMASK(15, 12)
#define TRCDEVARCH_ARCHVER(x) (((x) & TRCDEVARCH_ARCHVER_MASK) >> TRCDEVARCH_ARCHVER_SHIFT)
static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu)
{
struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
int trcdevarch;
if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH]))
return false;
trcdevarch = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH]);
/*
* ETE if ARCHVER is 5 (ARCHVER is 4 for ETM) and ARCHPART is 0xA13.
* See ETM_DEVARCH_ETE_ARCH in coresight-etm4x.h
*/
return TRCDEVARCH_ARCHVER(trcdevarch) == 5 && TRCDEVARCH_ARCHPART(trcdevarch) == 0xA13;
}
static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, int cpu)
{
struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
/* Get trace configuration register */
data[CS_ETMV4_TRCCONFIGR] = cs_etmv4_get_config(itr);
/* traceID set to legacy version, in case new perf running on older system */
data[CS_ETMV4_TRCTRACEIDR] =
CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG;
/* Get read-only information from sysFS */
data[CS_ETMV4_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
data[CS_ETMV4_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
data[CS_ETMV4_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
data[CS_ETMV4_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
data[CS_ETMV4_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS]);
/* Kernels older than 5.19 may not expose ts_source */
if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]))
data[CS_ETMV4_TS_SOURCE] = (__u64) cs_etm_get_ro_signed(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]);
else {
pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n",
cpu);
data[CS_ETMV4_TS_SOURCE] = (__u64) -1;
}
}
static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, int cpu)
{
struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
/* Get trace configuration register */
data[CS_ETE_TRCCONFIGR] = cs_etmv4_get_config(itr);
/* traceID set to legacy version, in case new perf running on older system */
data[CS_ETE_TRCTRACEIDR] =
CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG;
/* Get read-only information from sysFS */
data[CS_ETE_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_ete_ro[CS_ETE_TRCIDR0]);
data[CS_ETE_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_ete_ro[CS_ETE_TRCIDR1]);
data[CS_ETE_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_ete_ro[CS_ETE_TRCIDR2]);
data[CS_ETE_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_ete_ro[CS_ETE_TRCIDR8]);
data[CS_ETE_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_ete_ro[CS_ETE_TRCAUTHSTATUS]);
/* ETE uses the same registers as ETMv4 plus TRCDEVARCH */
data[CS_ETE_TRCDEVARCH] = cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_ete_ro[CS_ETE_TRCDEVARCH]);
/* Kernels older than 5.19 may not expose ts_source */
if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE]))
data[CS_ETE_TS_SOURCE] = (__u64) cs_etm_get_ro_signed(cs_etm_pmu, cpu,
metadata_ete_ro[CS_ETE_TS_SOURCE]);
else {
pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n",
cpu);
data[CS_ETE_TS_SOURCE] = (__u64) -1;
}
}
static void cs_etm_get_metadata(int cpu, u32 *offset,
struct auxtrace_record *itr,
struct perf_record_auxtrace_info *info)
{
u32 increment, nr_trc_params;
u64 magic;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
/* first see what kind of tracer this cpu is affined to */
if (cs_etm_is_ete(itr, cpu)) {
magic = __perf_cs_ete_magic;
cs_etm_save_ete_header(&info->priv[*offset], itr, cpu);
/* How much space was used */
increment = CS_ETE_PRIV_MAX;
nr_trc_params = CS_ETE_PRIV_MAX - CS_ETM_COMMON_BLK_MAX_V1;
} else if (cs_etm_is_etmv4(itr, cpu)) {
magic = __perf_cs_etmv4_magic;
cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu);
/* How much space was used */
increment = CS_ETMV4_PRIV_MAX;
nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR;
} else {
magic = __perf_cs_etmv3_magic;
/* Get configuration register */
info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
/* traceID set to legacy value in case new perf running on old system */
info->priv[*offset + CS_ETM_ETMTRACEIDR] =
CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG;
/* Get read-only information from sysFS */
info->priv[*offset + CS_ETM_ETMCCER] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv3_ro[CS_ETM_ETMCCER]);
info->priv[*offset + CS_ETM_ETMIDR] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv3_ro[CS_ETM_ETMIDR]);
/* How much space was used */
increment = CS_ETM_PRIV_MAX;
nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR;
}
/* Build generic header portion */
info->priv[*offset + CS_ETM_MAGIC] = magic;
info->priv[*offset + CS_ETM_CPU] = cpu;
info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params;
/* Where the next CPU entry should start from */
*offset += increment;
}
static int cs_etm_info_fill(struct auxtrace_record *itr,
struct perf_session *session,
struct perf_record_auxtrace_info *info,
size_t priv_size)
{
int i;
u32 offset;
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
return -EINVAL;
if (!session->evlist->core.nr_mmaps)
return -EINVAL;
/* If the cpu_map is empty all online CPUs are involved */
if (perf_cpu_map__empty(event_cpus)) {
cpu_map = online_cpus;
} else {
/* Make sure all specified CPUs are online */
for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
struct perf_cpu cpu = { .cpu = i, };
if (perf_cpu_map__has(event_cpus, cpu) &&
!perf_cpu_map__has(online_cpus, cpu))
return -EINVAL;
}
cpu_map = event_cpus;
}
nr_cpu = perf_cpu_map__nr(cpu_map);
/* Get PMU type as dynamically assigned by the core */
type = cs_etm_pmu->type;
/* First fill out the session header */
info->type = PERF_AUXTRACE_CS_ETM;
info->priv[CS_HEADER_VERSION] = CS_HEADER_CURRENT_VERSION;
info->priv[CS_PMU_TYPE_CPUS] = type << 32;
info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
offset = CS_ETM_SNAPSHOT + 1;
for (i = 0; i < cpu__max_cpu().cpu && offset < priv_size; i++) {
struct perf_cpu cpu = { .cpu = i, };
if (perf_cpu_map__has(cpu_map, cpu))
cs_etm_get_metadata(i, &offset, itr, info);
}
perf_cpu_map__put(online_cpus);
return 0;
}
static int cs_etm_snapshot_start(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct evsel *evsel;
evlist__for_each_entry(ptr->evlist, evsel) {
if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
return evsel__disable(evsel);
}
return -EINVAL;
}
static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct evsel *evsel;
evlist__for_each_entry(ptr->evlist, evsel) {
if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
return evsel__enable(evsel);
}
return -EINVAL;
}
static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
{
return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) |
(((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
}
static void cs_etm_recording_free(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
free(ptr);
}
struct auxtrace_record *cs_etm_record_init(int *err)
{
struct perf_pmu *cs_etm_pmu;
struct cs_etm_recording *ptr;
cs_etm_pmu = perf_pmus__find(CORESIGHT_ETM_PMU_NAME);
if (!cs_etm_pmu) {
*err = -EINVAL;
goto out;
}
ptr = zalloc(sizeof(struct cs_etm_recording));
if (!ptr) {
*err = -ENOMEM;
goto out;
}
ptr->cs_etm_pmu = cs_etm_pmu;
ptr->itr.pmu = cs_etm_pmu;
ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
ptr->itr.recording_options = cs_etm_recording_options;
ptr->itr.info_priv_size = cs_etm_info_priv_size;
ptr->itr.info_fill = cs_etm_info_fill;
ptr->itr.snapshot_start = cs_etm_snapshot_start;
ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
ptr->itr.reference = cs_etm_reference;
ptr->itr.free = cs_etm_recording_free;
ptr->itr.read_finish = auxtrace_record__read_finish;
*err = 0;
return &ptr->itr;
out:
return NULL;
}
/*
* Set a default config to enable the user changed config tracking mechanism
* (CFG_CHG and evsel__set_config_if_unset()). If no default is set then user
* changes aren't tracked.
*/
struct perf_event_attr *
cs_etm_get_default_config(struct perf_pmu *pmu __maybe_unused)
{
struct perf_event_attr *attr;
attr = zalloc(sizeof(struct perf_event_attr));
if (!attr)
return NULL;
attr->sample_period = 1;
return attr;
}
| linux-master | tools/perf/arch/arm/util/cs-etm.c |
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}
| linux-master | tools/perf/arch/arm/util/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <string.h>
#include <linux/compiler.h>
#include "debug.h"
#include "tests/tests.h"
#include "util/find-map.c"
#define VECTORS__MAP_NAME "[vectors]"
static int test__vectors_page(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
void *start, *end;
if (find_map(&start, &end, VECTORS__MAP_NAME)) {
pr_err("%s not found, is CONFIG_KUSER_HELPERS enabled?\n",
VECTORS__MAP_NAME);
return TEST_FAIL;
}
return TEST_OK;
}
DEFINE_SUITE("Vectors page", vectors_page);
| linux-master | tools/perf/arch/arm/tests/vectors-page.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "tests/tests.h"
#include "arch-tests.h"
struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
&suite__dwarf_unwind,
#endif
&suite__vectors_page,
NULL,
};
| linux-master | tools/perf/arch/arm/tests/arch-tests.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
#include "maps.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
#define STACK_SIZE 8192
static int sample_ustack(struct perf_sample *sample,
struct thread *thread, u64 *regs)
{
struct stack_dump *stack = &sample->user_stack;
struct map *map;
unsigned long sp;
u64 stack_size, *buf;
buf = malloc(STACK_SIZE);
if (!buf) {
pr_debug("failed to allocate sample uregs data\n");
return -1;
}
sp = (unsigned long) regs[PERF_REG_ARM_SP];
map = maps__find(thread__maps(thread), (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
return -1;
}
stack_size = map__end(map) - sp;
stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
memcpy(buf, (void *) sp, stack_size);
stack->data = (char *) buf;
stack->size = stack_size;
return 0;
}
int test__arch_unwind_sample(struct perf_sample *sample,
struct thread *thread)
{
struct regs_dump *regs = &sample->user_regs;
u64 *buf;
buf = calloc(1, sizeof(u64) * PERF_REGS_MAX);
if (!buf) {
pr_debug("failed to allocate sample uregs data\n");
return -1;
}
perf_regs_load(buf);
regs->abi = PERF_SAMPLE_REGS_ABI;
regs->regs = buf;
regs->mask = PERF_REGS_MASK;
return sample_ustack(sample, thread, buf);
}
| linux-master | tools/perf/arch/arm/tests/dwarf-unwind.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/zalloc.h>
#include <sys/types.h>
#include <regex.h>
#include <stdlib.h>
struct arm_annotate {
regex_t call_insn,
jump_insn;
};
static struct ins_ops *arm__associate_instruction_ops(struct arch *arch, const char *name)
{
struct arm_annotate *arm = arch->priv;
struct ins_ops *ops;
regmatch_t match[2];
if (!regexec(&arm->call_insn, name, 2, match, 0))
ops = &call_ops;
else if (!regexec(&arm->jump_insn, name, 2, match, 0))
ops = &jump_ops;
else
return NULL;
arch__associate_ins_ops(arch, name, ops);
return ops;
}
static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
struct arm_annotate *arm;
int err;
if (arch->initialized)
return 0;
arm = zalloc(sizeof(*arm));
if (!arm)
return ENOMEM;
#define ARM_CONDS "(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl|vc|vs)"
err = regcomp(&arm->call_insn, "^blx?" ARM_CONDS "?$", REG_EXTENDED);
if (err)
goto out_free_arm;
err = regcomp(&arm->jump_insn, "^bx?" ARM_CONDS "?$", REG_EXTENDED);
if (err)
goto out_free_call;
#undef ARM_CONDS
arch->initialized = true;
arch->priv = arm;
arch->associate_instruction_ops = arm__associate_instruction_ops;
arch->objdump.comment_char = ';';
arch->objdump.skip_functions_char = '+';
return 0;
out_free_call:
regfree(&arm->call_insn);
out_free_arm:
free(arm);
return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP;
}
| linux-master | tools/perf/arch/arm/annotate/instructions.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#include <elfutils/libdwfl.h>
#include "perf_regs.h"
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
#include "../../util/event.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
struct unwind_info *ui = arg;
struct regs_dump *user_regs = &ui->sample->user_regs;
Dwarf_Word dwarf_regs[PERF_REG_CSKY_MAX];
#define REG(r) ({ \
Dwarf_Word val = 0; \
perf_reg_value(&val, user_regs, PERF_REG_CSKY_##r); \
val; \
})
#if defined(__CSKYABIV2__)
dwarf_regs[0] = REG(A0);
dwarf_regs[1] = REG(A1);
dwarf_regs[2] = REG(A2);
dwarf_regs[3] = REG(A3);
dwarf_regs[4] = REG(REGS0);
dwarf_regs[5] = REG(REGS1);
dwarf_regs[6] = REG(REGS2);
dwarf_regs[7] = REG(REGS3);
dwarf_regs[8] = REG(REGS4);
dwarf_regs[9] = REG(REGS5);
dwarf_regs[10] = REG(REGS6);
dwarf_regs[11] = REG(REGS7);
dwarf_regs[12] = REG(REGS8);
dwarf_regs[13] = REG(REGS9);
dwarf_regs[14] = REG(SP);
dwarf_regs[15] = REG(LR);
dwarf_regs[16] = REG(EXREGS0);
dwarf_regs[17] = REG(EXREGS1);
dwarf_regs[18] = REG(EXREGS2);
dwarf_regs[19] = REG(EXREGS3);
dwarf_regs[20] = REG(EXREGS4);
dwarf_regs[21] = REG(EXREGS5);
dwarf_regs[22] = REG(EXREGS6);
dwarf_regs[23] = REG(EXREGS7);
dwarf_regs[24] = REG(EXREGS8);
dwarf_regs[25] = REG(EXREGS9);
dwarf_regs[26] = REG(EXREGS10);
dwarf_regs[27] = REG(EXREGS11);
dwarf_regs[28] = REG(EXREGS12);
dwarf_regs[29] = REG(EXREGS13);
dwarf_regs[30] = REG(EXREGS14);
dwarf_regs[31] = REG(TLS);
dwarf_regs[32] = REG(PC);
#else
dwarf_regs[0] = REG(SP);
dwarf_regs[1] = REG(REGS9);
dwarf_regs[2] = REG(A0);
dwarf_regs[3] = REG(A1);
dwarf_regs[4] = REG(A2);
dwarf_regs[5] = REG(A3);
dwarf_regs[6] = REG(REGS0);
dwarf_regs[7] = REG(REGS1);
dwarf_regs[8] = REG(REGS2);
dwarf_regs[9] = REG(REGS3);
dwarf_regs[10] = REG(REGS4);
dwarf_regs[11] = REG(REGS5);
dwarf_regs[12] = REG(REGS6);
dwarf_regs[13] = REG(REGS7);
dwarf_regs[14] = REG(REGS8);
dwarf_regs[15] = REG(LR);
#endif
dwfl_thread_state_register_pc(thread, REG(PC));
return dwfl_thread_state_registers(thread, 0, PERF_REG_CSKY_MAX,
dwarf_regs);
}
| linux-master | tools/perf/arch/csky/util/unwind-libdw.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
// Mapping of DWARF debug register numbers into register names.
#include <stddef.h>
#include <dwarf-regs.h>
#if defined(__CSKYABIV2__)
#define CSKY_MAX_REGS 73
const char *csky_dwarf_regs_table[CSKY_MAX_REGS] = {
/* r0 ~ r8 */
"%a0", "%a1", "%a2", "%a3", "%regs0", "%regs1", "%regs2", "%regs3",
/* r9 ~ r15 */
"%regs4", "%regs5", "%regs6", "%regs7", "%regs8", "%regs9", "%sp",
"%lr",
/* r16 ~ r23 */
"%exregs0", "%exregs1", "%exregs2", "%exregs3", "%exregs4",
"%exregs5", "%exregs6", "%exregs7",
/* r24 ~ r31 */
"%exregs8", "%exregs9", "%exregs10", "%exregs11", "%exregs12",
"%exregs13", "%exregs14", "%tls",
"%pc", NULL, NULL, NULL, "%hi", "%lo", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
"%epc",
};
#else
#define CSKY_MAX_REGS 57
const char *csky_dwarf_regs_table[CSKY_MAX_REGS] = {
/* r0 ~ r8 */
"%sp", "%regs9", "%a0", "%a1", "%a2", "%a3", "%regs0", "%regs1",
/* r9 ~ r15 */
"%regs2", "%regs3", "%regs4", "%regs5", "%regs6", "%regs7", "%regs8",
"%lr",
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
"%epc",
};
#endif
const char *get_arch_regstr(unsigned int n)
{
return (n < CSKY_MAX_REGS) ? csky_dwarf_regs_table[n] : NULL;
}
| linux-master | tools/perf/arch/csky/util/dwarf-regs.c |
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}
| linux-master | tools/perf/arch/csky/util/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/compiler.h>
static struct ins_ops *csky__associate_ins_ops(struct arch *arch,
const char *name)
{
struct ins_ops *ops = NULL;
/* catch all kind of jumps */
if (!strcmp(name, "bt") ||
!strcmp(name, "bf") ||
!strcmp(name, "bez") ||
!strcmp(name, "bnez") ||
!strcmp(name, "bnezad") ||
!strcmp(name, "bhsz") ||
!strcmp(name, "bhz") ||
!strcmp(name, "blsz") ||
!strcmp(name, "blz") ||
!strcmp(name, "br") ||
!strcmp(name, "jmpi") ||
!strcmp(name, "jmp"))
ops = &jump_ops;
/* catch function call */
if (!strcmp(name, "bsr") ||
!strcmp(name, "jsri") ||
!strcmp(name, "jsr"))
ops = &call_ops;
/* catch function return */
if (!strcmp(name, "rts"))
ops = &ret_ops;
if (ops)
arch__associate_ins_ops(arch, name, ops);
return ops;
}
static int csky__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
arch->initialized = true;
arch->objdump.comment_char = '/';
arch->associate_instruction_ops = csky__associate_ins_ops;
return 0;
}
| linux-master | tools/perf/arch/csky/annotate/instructions.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
static int arc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
arch->initialized = true;
arch->objdump.comment_char = ';';
return 0;
}
| linux-master | tools/perf/arch/arc/annotate/instructions.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
#include <elfutils/libdwfl.h>
#include "perf_regs.h"
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
#include "../../util/sample.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
struct unwind_info *ui = arg;
struct regs_dump *user_regs = &ui->sample->user_regs;
Dwarf_Word dwarf_regs[32];
#define REG(r) ({ \
Dwarf_Word val = 0; \
perf_reg_value(&val, user_regs, PERF_REG_RISCV_##r); \
val; \
})
dwarf_regs[0] = 0;
dwarf_regs[1] = REG(RA);
dwarf_regs[2] = REG(SP);
dwarf_regs[3] = REG(GP);
dwarf_regs[4] = REG(TP);
dwarf_regs[5] = REG(T0);
dwarf_regs[6] = REG(T1);
dwarf_regs[7] = REG(T2);
dwarf_regs[8] = REG(S0);
dwarf_regs[9] = REG(S1);
dwarf_regs[10] = REG(A0);
dwarf_regs[11] = REG(A1);
dwarf_regs[12] = REG(A2);
dwarf_regs[13] = REG(A3);
dwarf_regs[14] = REG(A4);
dwarf_regs[15] = REG(A5);
dwarf_regs[16] = REG(A6);
dwarf_regs[17] = REG(A7);
dwarf_regs[18] = REG(S2);
dwarf_regs[19] = REG(S3);
dwarf_regs[20] = REG(S4);
dwarf_regs[21] = REG(S5);
dwarf_regs[22] = REG(S6);
dwarf_regs[23] = REG(S7);
dwarf_regs[24] = REG(S8);
dwarf_regs[25] = REG(S9);
dwarf_regs[26] = REG(S10);
dwarf_regs[27] = REG(S11);
dwarf_regs[28] = REG(T3);
dwarf_regs[29] = REG(T4);
dwarf_regs[30] = REG(T5);
dwarf_regs[31] = REG(T6);
dwfl_thread_state_register_pc(thread, REG(PC));
return dwfl_thread_state_registers(thread, 0, PERF_REG_RISCV_MAX,
dwarf_regs);
}
| linux-master | tools/perf/arch/riscv/util/unwind-libdw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
* Mapping of DWARF debug register numbers into register names.
*/
#include <stddef.h>
#include <errno.h> /* for EINVAL */
#include <string.h> /* for strcmp */
#include <dwarf-regs.h>
struct pt_regs_dwarfnum {
const char *name;
unsigned int dwarfnum;
};
#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
struct pt_regs_dwarfnum riscv_dwarf_regs_table[] = {
REG_DWARFNUM_NAME("%zero", 0),
REG_DWARFNUM_NAME("%ra", 1),
REG_DWARFNUM_NAME("%sp", 2),
REG_DWARFNUM_NAME("%gp", 3),
REG_DWARFNUM_NAME("%tp", 4),
REG_DWARFNUM_NAME("%t0", 5),
REG_DWARFNUM_NAME("%t1", 6),
REG_DWARFNUM_NAME("%t2", 7),
REG_DWARFNUM_NAME("%s0", 8),
REG_DWARFNUM_NAME("%s1", 9),
REG_DWARFNUM_NAME("%a0", 10),
REG_DWARFNUM_NAME("%a1", 11),
REG_DWARFNUM_NAME("%a2", 12),
REG_DWARFNUM_NAME("%a3", 13),
REG_DWARFNUM_NAME("%a4", 14),
REG_DWARFNUM_NAME("%a5", 15),
REG_DWARFNUM_NAME("%a6", 16),
REG_DWARFNUM_NAME("%a7", 17),
REG_DWARFNUM_NAME("%s2", 18),
REG_DWARFNUM_NAME("%s3", 19),
REG_DWARFNUM_NAME("%s4", 20),
REG_DWARFNUM_NAME("%s5", 21),
REG_DWARFNUM_NAME("%s6", 22),
REG_DWARFNUM_NAME("%s7", 23),
REG_DWARFNUM_NAME("%s8", 24),
REG_DWARFNUM_NAME("%s9", 25),
REG_DWARFNUM_NAME("%s10", 26),
REG_DWARFNUM_NAME("%s11", 27),
REG_DWARFNUM_NAME("%t3", 28),
REG_DWARFNUM_NAME("%t4", 29),
REG_DWARFNUM_NAME("%t5", 30),
REG_DWARFNUM_NAME("%t6", 31),
REG_DWARFNUM_END,
};
#define RISCV_MAX_REGS ((sizeof(riscv_dwarf_regs_table) / \
sizeof(riscv_dwarf_regs_table[0])) - 1)
const char *get_arch_regstr(unsigned int n)
{
return (n < RISCV_MAX_REGS) ? riscv_dwarf_regs_table[n].name : NULL;
}
int regs_query_register_offset(const char *name)
{
const struct pt_regs_dwarfnum *roff;
for (roff = riscv_dwarf_regs_table; roff->name; roff++)
if (!strcmp(roff->name, name))
return roff->dwarfnum;
return -EINVAL;
}
| linux-master | tools/perf/arch/riscv/util/dwarf-regs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Implementation of get_cpuid().
*
* Author: Nikita Shubin <[email protected]>
*/
#include <stdio.h>
#include <stdlib.h>
#include <api/fs/fs.h>
#include <errno.h>
#include "../../util/debug.h"
#include "../../util/header.h"
#define CPUINFO_MVEN "mvendorid"
#define CPUINFO_MARCH "marchid"
#define CPUINFO_MIMP "mimpid"
#define CPUINFO "/proc/cpuinfo"
static char *_get_field(const char *line)
{
char *line2, *nl;
line2 = strrchr(line, ' ');
if (!line2)
return NULL;
line2++;
nl = strrchr(line, '\n');
if (!nl)
return NULL;
return strndup(line2, nl - line2);
}
static char *_get_cpuid(void)
{
char *line = NULL;
char *mvendorid = NULL;
char *marchid = NULL;
char *mimpid = NULL;
char *cpuid = NULL;
int read;
unsigned long line_sz;
FILE *cpuinfo;
cpuinfo = fopen(CPUINFO, "r");
if (cpuinfo == NULL)
return cpuid;
while ((read = getline(&line, &line_sz, cpuinfo)) != -1) {
if (!strncmp(line, CPUINFO_MVEN, strlen(CPUINFO_MVEN))) {
mvendorid = _get_field(line);
if (!mvendorid)
goto free;
} else if (!strncmp(line, CPUINFO_MARCH, strlen(CPUINFO_MARCH))) {
marchid = _get_field(line);
if (!marchid)
goto free;
} else if (!strncmp(line, CPUINFO_MIMP, strlen(CPUINFO_MIMP))) {
mimpid = _get_field(line);
if (!mimpid)
goto free;
break;
}
}
if (!mvendorid || !marchid || !mimpid)
goto free;
if (asprintf(&cpuid, "%s-%s-%s", mvendorid, marchid, mimpid) < 0)
cpuid = NULL;
free:
fclose(cpuinfo);
free(mvendorid);
free(marchid);
free(mimpid);
return cpuid;
}
int get_cpuid(char *buffer, size_t sz)
{
char *cpuid = _get_cpuid();
int ret = 0;
if (sz < strlen(cpuid)) {
ret = -EINVAL;
goto free;
}
scnprintf(buffer, sz, "%s", cpuid);
free:
free(cpuid);
return ret;
}
char *
get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
return _get_cpuid();
}
| linux-master | tools/perf/arch/riscv/util/header.c |
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
};
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}
| linux-master | tools/perf/arch/riscv/util/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (C) 2010 David S. Miller <[email protected]>
*/
#include <stddef.h>
#include <dwarf-regs.h>
#define SPARC_MAX_REGS 96
const char *sparc_regs_table[SPARC_MAX_REGS] = {
"%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7",
"%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7",
"%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7",
"%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7",
"%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
"%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
"%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
"%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
"%f32", "%f33", "%f34", "%f35", "%f36", "%f37", "%f38", "%f39",
"%f40", "%f41", "%f42", "%f43", "%f44", "%f45", "%f46", "%f47",
"%f48", "%f49", "%f50", "%f51", "%f52", "%f53", "%f54", "%f55",
"%f56", "%f57", "%f58", "%f59", "%f60", "%f61", "%f62", "%f63",
};
/**
* get_arch_regstr() - lookup register name from it's DWARF register number
* @n: the DWARF register number
*
* get_arch_regstr() returns the name of the register in struct
* regdwarfnum_table from it's DWARF register number. If the register is not
* found in the table, this returns NULL;
*/
const char *get_arch_regstr(unsigned int n)
{
return (n < SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
}
| linux-master | tools/perf/arch/sparc/util/dwarf-regs.c |
// SPDX-License-Identifier: GPL-2.0
static int is_branch_cond(const char *cond)
{
if (cond[0] == '\0')
return 1;
if (cond[0] == 'a' && cond[1] == '\0')
return 1;
if (cond[0] == 'c' &&
(cond[1] == 'c' || cond[1] == 's') &&
cond[2] == '\0')
return 1;
if (cond[0] == 'e' &&
(cond[1] == '\0' ||
(cond[1] == 'q' && cond[2] == '\0')))
return 1;
if (cond[0] == 'g' &&
(cond[1] == '\0' ||
(cond[1] == 't' && cond[2] == '\0') ||
(cond[1] == 'e' && cond[2] == '\0') ||
(cond[1] == 'e' && cond[2] == 'u' && cond[3] == '\0')))
return 1;
if (cond[0] == 'l' &&
(cond[1] == '\0' ||
(cond[1] == 't' && cond[2] == '\0') ||
(cond[1] == 'u' && cond[2] == '\0') ||
(cond[1] == 'e' && cond[2] == '\0') ||
(cond[1] == 'e' && cond[2] == 'u' && cond[3] == '\0')))
return 1;
if (cond[0] == 'n' &&
(cond[1] == '\0' ||
(cond[1] == 'e' && cond[2] == '\0') ||
(cond[1] == 'z' && cond[2] == '\0') ||
(cond[1] == 'e' && cond[2] == 'g' && cond[3] == '\0')))
return 1;
if (cond[0] == 'b' &&
cond[1] == 'p' &&
cond[2] == 'o' &&
cond[3] == 's' &&
cond[4] == '\0')
return 1;
if (cond[0] == 'v' &&
(cond[1] == 'c' || cond[1] == 's') &&
cond[2] == '\0')
return 1;
if (cond[0] == 'b' &&
cond[1] == 'z' &&
cond[2] == '\0')
return 1;
return 0;
}
static int is_branch_reg_cond(const char *cond)
{
if ((cond[0] == 'n' || cond[0] == 'l') &&
cond[1] == 'z' &&
cond[2] == '\0')
return 1;
if (cond[0] == 'z' &&
cond[1] == '\0')
return 1;
if ((cond[0] == 'g' || cond[0] == 'l') &&
cond[1] == 'e' &&
cond[2] == 'z' &&
cond[3] == '\0')
return 1;
if (cond[0] == 'g' &&
cond[1] == 'z' &&
cond[2] == '\0')
return 1;
return 0;
}
static int is_branch_float_cond(const char *cond)
{
if (cond[0] == '\0')
return 1;
if ((cond[0] == 'a' || cond[0] == 'e' ||
cond[0] == 'z' || cond[0] == 'g' ||
cond[0] == 'l' || cond[0] == 'n' ||
cond[0] == 'o' || cond[0] == 'u') &&
cond[1] == '\0')
return 1;
if (((cond[0] == 'g' && cond[1] == 'e') ||
(cond[0] == 'l' && (cond[1] == 'e' ||
cond[1] == 'g')) ||
(cond[0] == 'n' && (cond[1] == 'e' ||
cond[1] == 'z')) ||
(cond[0] == 'u' && (cond[1] == 'e' ||
cond[1] == 'g' ||
cond[1] == 'l'))) &&
cond[2] == '\0')
return 1;
if (cond[0] == 'u' &&
(cond[1] == 'g' || cond[1] == 'l') &&
cond[2] == 'e' &&
cond[3] == '\0')
return 1;
return 0;
}
static struct ins_ops *sparc__associate_instruction_ops(struct arch *arch, const char *name)
{
struct ins_ops *ops = NULL;
if (!strcmp(name, "call") ||
!strcmp(name, "jmp") ||
!strcmp(name, "jmpl")) {
ops = &call_ops;
} else if (!strcmp(name, "ret") ||
!strcmp(name, "retl") ||
!strcmp(name, "return")) {
ops = &ret_ops;
} else if (!strcmp(name, "mov")) {
ops = &mov_ops;
} else {
if (name[0] == 'c' &&
(name[1] == 'w' || name[1] == 'x'))
name += 2;
if (name[0] == 'b') {
const char *cond = name + 1;
if (cond[0] == 'r') {
if (is_branch_reg_cond(cond + 1))
ops = &jump_ops;
} else if (is_branch_cond(cond)) {
ops = &jump_ops;
}
} else if (name[0] == 'f' && name[1] == 'b') {
if (is_branch_float_cond(name + 2))
ops = &jump_ops;
}
}
if (ops)
arch__associate_ins_ops(arch, name, ops);
return ops;
}
static int sparc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
if (!arch->initialized) {
arch->initialized = true;
arch->associate_instruction_ops = sparc__associate_instruction_ops;
arch->objdump.comment_char = '#';
}
return 0;
}
| linux-master | tools/perf/arch/sparc/annotate/instructions.c |
// SPDX-License-Identifier: GPL-2.0
#include <internal/cpumap.h>
#include "../../../util/cpumap.h"
#include "../../../util/header.h"
#include "../../../util/pmu.h"
#include "../../../util/pmus.h"
#include <api/fs/fs.h>
#include <math.h>
const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
struct perf_pmu *pmu = pmu__find_core_pmu();
if (pmu)
return perf_pmu__find_metrics_table(pmu);
return NULL;
}
const struct pmu_events_table *pmu_events_table__find(void)
{
struct perf_pmu *pmu = pmu__find_core_pmu();
if (pmu)
return perf_pmu__find_events_table(pmu);
return NULL;
}
double perf_pmu__cpu_slots_per_cycle(void)
{
char path[PATH_MAX];
unsigned long long slots = 0;
struct perf_pmu *pmu = pmu__find_core_pmu();
if (pmu) {
perf_pmu__pathname_scnprintf(path, sizeof(path),
pmu->name, "caps/slots");
/*
* The value of slots is not greater than 32 bits, but
* filename__read_int can't read value with 0x prefix,
* so use filename__read_ull instead.
*/
filename__read_ull(path, &slots);
}
return slots ? (double)slots : NAN;
}
| linux-master | tools/perf/arch/arm64/util/pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Arm Statistical Profiling Extensions (SPE) support
* Copyright (c) 2017-2018, Arm Ltd.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
#include <time.h>
#include "../../../util/cpumap.h"
#include "../../../util/event.h"
#include "../../../util/evsel.h"
#include "../../../util/evsel_config.h"
#include "../../../util/evlist.h"
#include "../../../util/session.h"
#include <internal/lib.h> // page_size
#include "../../../util/pmu.h"
#include "../../../util/debug.h"
#include "../../../util/auxtrace.h"
#include "../../../util/record.h"
#include "../../../util/arm-spe.h"
#include <tools/libc_compat.h> // reallocarray
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
struct arm_spe_recording {
struct auxtrace_record itr;
struct perf_pmu *arm_spe_pmu;
struct evlist *evlist;
int wrapped_cnt;
bool *wrapped;
};
static size_t
arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct evlist *evlist __maybe_unused)
{
return ARM_SPE_AUXTRACE_PRIV_SIZE;
}
static int arm_spe_info_fill(struct auxtrace_record *itr,
struct perf_session *session,
struct perf_record_auxtrace_info *auxtrace_info,
size_t priv_size)
{
struct arm_spe_recording *sper =
container_of(itr, struct arm_spe_recording, itr);
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
if (priv_size != ARM_SPE_AUXTRACE_PRIV_SIZE)
return -EINVAL;
if (!session->evlist->core.nr_mmaps)
return -EINVAL;
auxtrace_info->type = PERF_AUXTRACE_ARM_SPE;
auxtrace_info->priv[ARM_SPE_PMU_TYPE] = arm_spe_pmu->type;
return 0;
}
static void
arm_spe_snapshot_resolve_auxtrace_defaults(struct record_opts *opts,
bool privileged)
{
/*
* The default snapshot size is the auxtrace mmap size. If neither auxtrace mmap size nor
* snapshot size is specified, then the default is 4MiB for privileged users, 128KiB for
* unprivileged users.
*
* The default auxtrace mmap size is 4MiB/page_size for privileged users, 128KiB for
* unprivileged users. If an unprivileged user does not specify mmap pages, the mmap pages
* will be reduced from the default 512KiB/page_size to 256KiB/page_size, otherwise the
* user is likely to get an error as they exceed their mlock limmit.
*/
/*
* No size were given to '-S' or '-m,', so go with the default
*/
if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages = KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
} else if (!opts->auxtrace_mmap_pages && !privileged && opts->mmap_pages == UINT_MAX) {
opts->mmap_pages = KiB(256) / page_size;
}
/*
* '-m,xyz' was specified but no snapshot size, so make the snapshot size as big as the
* auxtrace mmap area.
*/
if (!opts->auxtrace_snapshot_size)
opts->auxtrace_snapshot_size = opts->auxtrace_mmap_pages * (size_t)page_size;
/*
* '-Sxyz' was specified but no auxtrace mmap area, so make the auxtrace mmap area big
* enough to fit the requested snapshot size.
*/
if (!opts->auxtrace_mmap_pages) {
size_t sz = opts->auxtrace_snapshot_size;
sz = round_up(sz, page_size) / page_size;
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
}
}
static int arm_spe_recording_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
{
struct arm_spe_recording *sper =
container_of(itr, struct arm_spe_recording, itr);
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
struct evsel *evsel, *arm_spe_evsel = NULL;
struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel;
int err;
u64 bit;
sper->evlist = evlist;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == arm_spe_pmu->type) {
if (arm_spe_evsel) {
pr_err("There may be only one " ARM_SPE_PMU_NAME "x event\n");
return -EINVAL;
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = arm_spe_pmu->default_config->sample_period;
evsel->needs_auxtrace_mmap = true;
arm_spe_evsel = evsel;
opts->full_auxtrace = true;
}
}
if (!opts->full_auxtrace)
return 0;
/*
* we are in snapshot mode.
*/
if (opts->auxtrace_snapshot_mode) {
/*
* Command arguments '-Sxyz' and/or '-m,xyz' are missing, so fill those in with
* default values.
*/
if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages)
arm_spe_snapshot_resolve_auxtrace_defaults(opts, privileged);
/*
* Snapshot size can't be bigger than the auxtrace area.
*/
if (opts->auxtrace_snapshot_size > opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
opts->auxtrace_snapshot_size,
opts->auxtrace_mmap_pages * (size_t)page_size);
return -EINVAL;
}
/*
* Something went wrong somewhere - this shouldn't happen.
*/
if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
return -EINVAL;
}
}
/* We are in full trace mode but '-m,xyz' wasn't specified */
if (!opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages = KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
}
/* Validate auxtrace_mmap_pages */
if (opts->auxtrace_mmap_pages) {
size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
size_t min_sz = KiB(8);
if (sz < min_sz || !is_power_of_2(sz)) {
pr_err("Invalid mmap size for ARM SPE: must be at least %zuKiB and a power of 2\n",
min_sz / 1024);
return -EINVAL;
}
}
if (opts->auxtrace_snapshot_mode)
pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME,
opts->auxtrace_snapshot_size);
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace event
* must come first.
*/
evlist__to_front(evlist, arm_spe_evsel);
/*
* In the case of per-cpu mmaps, sample CPU for AUX event;
* also enable the timestamp tracing for samples correlation.
*/
if (!perf_cpu_map__empty(cpus)) {
evsel__set_sample_bit(arm_spe_evsel, CPU);
evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel,
"ts_enable", 1);
}
/*
* Set this only so that perf report knows that SPE generates memory info. It has no effect
* on the opening of the event or the SPE data produced.
*/
evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
/*
* The PHYS_ADDR flag does not affect the driver behaviour, it is used to
* inform that the resulting output's SPE samples contain physical addresses
* where applicable.
*/
bit = perf_pmu__format_bits(arm_spe_pmu, "pa_enable");
if (arm_spe_evsel->core.attr.config & bit)
evsel__set_sample_bit(arm_spe_evsel, PHYS_ADDR);
/* Add dummy event to keep tracking */
err = parse_event(evlist, "dummy:u");
if (err)
return err;
tracking_evsel = evlist__last(evlist);
evlist__set_tracking_event(evlist, tracking_evsel);
tracking_evsel->core.attr.freq = 0;
tracking_evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */
if (!perf_cpu_map__empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME);
evsel__set_sample_bit(tracking_evsel, CPU);
/* also track task context switch */
if (!record_opts__no_switch_events(opts))
tracking_evsel->core.attr.context_switch = 1;
}
return 0;
}
static int arm_spe_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
struct record_opts *opts,
const char *str)
{
unsigned long long snapshot_size = 0;
char *endptr;
if (str) {
snapshot_size = strtoull(str, &endptr, 0);
if (*endptr || snapshot_size > SIZE_MAX)
return -1;
}
opts->auxtrace_snapshot_mode = true;
opts->auxtrace_snapshot_size = snapshot_size;
return 0;
}
static int arm_spe_snapshot_start(struct auxtrace_record *itr)
{
struct arm_spe_recording *ptr =
container_of(itr, struct arm_spe_recording, itr);
struct evsel *evsel;
evlist__for_each_entry(ptr->evlist, evsel) {
if (evsel->core.attr.type == ptr->arm_spe_pmu->type)
return evsel__disable(evsel);
}
return -EINVAL;
}
static int arm_spe_snapshot_finish(struct auxtrace_record *itr)
{
struct arm_spe_recording *ptr =
container_of(itr, struct arm_spe_recording, itr);
struct evsel *evsel;
evlist__for_each_entry(ptr->evlist, evsel) {
if (evsel->core.attr.type == ptr->arm_spe_pmu->type)
return evsel__enable(evsel);
}
return -EINVAL;
}
static int arm_spe_alloc_wrapped_array(struct arm_spe_recording *ptr, int idx)
{
bool *wrapped;
int cnt = ptr->wrapped_cnt, new_cnt, i;
/*
* No need to allocate, so return early.
*/
if (idx < cnt)
return 0;
/*
* Make ptr->wrapped as big as idx.
*/
new_cnt = idx + 1;
/*
* Free'ed in arm_spe_recording_free().
*/
wrapped = reallocarray(ptr->wrapped, new_cnt, sizeof(bool));
if (!wrapped)
return -ENOMEM;
/*
* init new allocated values.
*/
for (i = cnt; i < new_cnt; i++)
wrapped[i] = false;
ptr->wrapped_cnt = new_cnt;
ptr->wrapped = wrapped;
return 0;
}
static bool arm_spe_buffer_has_wrapped(unsigned char *buffer,
size_t buffer_size, u64 head)
{
u64 i, watermark;
u64 *buf = (u64 *)buffer;
size_t buf_size = buffer_size;
/*
* Defensively handle the case where head might be continually increasing - if its value is
* equal or greater than the size of the ring buffer, then we can safely determine it has
* wrapped around. Otherwise, continue to detect if head might have wrapped.
*/
if (head >= buffer_size)
return true;
/*
* We want to look the very last 512 byte (chosen arbitrarily) in the ring buffer.
*/
watermark = buf_size - 512;
/*
* The value of head is somewhere within the size of the ring buffer. This can be that there
* hasn't been enough data to fill the ring buffer yet or the trace time was so long that
* head has numerically wrapped around. To find we need to check if we have data at the
* very end of the ring buffer. We can reliably do this because mmap'ed pages are zeroed
* out and there is a fresh mapping with every new session.
*/
/*
* head is less than 512 byte from the end of the ring buffer.
*/
if (head > watermark)
watermark = head;
/*
* Speed things up by using 64 bit transactions (see "u64 *buf" above)
*/
watermark /= sizeof(u64);
buf_size /= sizeof(u64);
/*
* If we find trace data at the end of the ring buffer, head has been there and has
* numerically wrapped around at least once.
*/
for (i = watermark; i < buf_size; i++)
if (buf[i])
return true;
return false;
}
static int arm_spe_find_snapshot(struct auxtrace_record *itr, int idx,
struct auxtrace_mmap *mm, unsigned char *data,
u64 *head, u64 *old)
{
int err;
bool wrapped;
struct arm_spe_recording *ptr =
container_of(itr, struct arm_spe_recording, itr);
/*
* Allocate memory to keep track of wrapping if this is the first
* time we deal with this *mm.
*/
if (idx >= ptr->wrapped_cnt) {
err = arm_spe_alloc_wrapped_array(ptr, idx);
if (err)
return err;
}
/*
* Check to see if *head has wrapped around. If it hasn't only the
* amount of data between *head and *old is snapshot'ed to avoid
* bloating the perf.data file with zeros. But as soon as *head has
* wrapped around the entire size of the AUX ring buffer it taken.
*/
wrapped = ptr->wrapped[idx];
if (!wrapped && arm_spe_buffer_has_wrapped(data, mm->len, *head)) {
wrapped = true;
ptr->wrapped[idx] = true;
}
pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
__func__, idx, (size_t)*old, (size_t)*head, mm->len);
/*
* No wrap has occurred, we can just use *head and *old.
*/
if (!wrapped)
return 0;
/*
* *head has wrapped around - adjust *head and *old to pickup the
* entire content of the AUX buffer.
*/
if (*head >= mm->len) {
*old = *head - mm->len;
} else {
*head += mm->len;
*old = *head - mm->len;
}
return 0;
}
static u64 arm_spe_reference(struct auxtrace_record *itr __maybe_unused)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
return ts.tv_sec ^ ts.tv_nsec;
}
static void arm_spe_recording_free(struct auxtrace_record *itr)
{
struct arm_spe_recording *sper =
container_of(itr, struct arm_spe_recording, itr);
zfree(&sper->wrapped);
free(sper);
}
struct auxtrace_record *arm_spe_recording_init(int *err,
struct perf_pmu *arm_spe_pmu)
{
struct arm_spe_recording *sper;
if (!arm_spe_pmu) {
*err = -ENODEV;
return NULL;
}
sper = zalloc(sizeof(struct arm_spe_recording));
if (!sper) {
*err = -ENOMEM;
return NULL;
}
sper->arm_spe_pmu = arm_spe_pmu;
sper->itr.pmu = arm_spe_pmu;
sper->itr.snapshot_start = arm_spe_snapshot_start;
sper->itr.snapshot_finish = arm_spe_snapshot_finish;
sper->itr.find_snapshot = arm_spe_find_snapshot;
sper->itr.parse_snapshot_options = arm_spe_parse_snapshot_options;
sper->itr.recording_options = arm_spe_recording_options;
sper->itr.info_priv_size = arm_spe_info_priv_size;
sper->itr.info_fill = arm_spe_info_fill;
sper->itr.free = arm_spe_recording_free;
sper->itr.reference = arm_spe_reference;
sper->itr.read_finish = auxtrace_record__read_finish;
sper->itr.alignment = 0;
*err = 0;
return &sper->itr;
}
struct perf_event_attr
*arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu)
{
struct perf_event_attr *attr;
attr = zalloc(sizeof(struct perf_event_attr));
if (!attr) {
pr_err("arm_spe default config cannot allocate a perf_event_attr\n");
return NULL;
}
/*
* If kernel driver doesn't advertise a minimum,
* use max allowable by PMSIDR_EL1.INTERVAL
*/
if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu",
&attr->sample_period) != 1) {
pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n");
attr->sample_period = 4096;
}
arm_spe_pmu->selectable = true;
arm_spe_pmu->is_uncore = false;
return attr;
}
| linux-master | tools/perf/arch/arm64/util/arm-spe.c |
// SPDX-License-Identifier: GPL-2.0
#include "map_symbol.h"
#include "mem-events.h"
#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
E("spe-load", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=0,min_latency=%u/", "arm_spe_0"),
E("spe-store", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=0,store_filter=1/", "arm_spe_0"),
E("spe-ldst", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=1,min_latency=%u/", "arm_spe_0"),
};
static char mem_ev_name[100];
struct perf_mem_event *perf_mem_events__ptr(int i)
{
if (i >= PERF_MEM_EVENTS__MAX)
return NULL;
return &perf_mem_events[i];
}
const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
if (i >= PERF_MEM_EVENTS__MAX)
return NULL;
if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE)
scnprintf(mem_ev_name, sizeof(mem_ev_name),
e->name, perf_mem_events__loads_ldlat);
else /* PERF_MEM_EVENTS__STORE */
scnprintf(mem_ev_name, sizeof(mem_ev_name), e->name);
return mem_ev_name;
}
| linux-master | tools/perf/arch/arm64/util/mem-events.c |
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
#include "perf_regs.h"
#include "../../../util/unwind-libdw.h"
#include "../../../util/perf_regs.h"
#include "../../../util/sample.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
struct unwind_info *ui = arg;
struct regs_dump *user_regs = &ui->sample->user_regs;
Dwarf_Word dwarf_regs[PERF_REG_ARM64_MAX], dwarf_pc;
#define REG(r) ({ \
Dwarf_Word val = 0; \
perf_reg_value(&val, user_regs, PERF_REG_ARM64_##r); \
val; \
})
dwarf_regs[0] = REG(X0);
dwarf_regs[1] = REG(X1);
dwarf_regs[2] = REG(X2);
dwarf_regs[3] = REG(X3);
dwarf_regs[4] = REG(X4);
dwarf_regs[5] = REG(X5);
dwarf_regs[6] = REG(X6);
dwarf_regs[7] = REG(X7);
dwarf_regs[8] = REG(X8);
dwarf_regs[9] = REG(X9);
dwarf_regs[10] = REG(X10);
dwarf_regs[11] = REG(X11);
dwarf_regs[12] = REG(X12);
dwarf_regs[13] = REG(X13);
dwarf_regs[14] = REG(X14);
dwarf_regs[15] = REG(X15);
dwarf_regs[16] = REG(X16);
dwarf_regs[17] = REG(X17);
dwarf_regs[18] = REG(X18);
dwarf_regs[19] = REG(X19);
dwarf_regs[20] = REG(X20);
dwarf_regs[21] = REG(X21);
dwarf_regs[22] = REG(X22);
dwarf_regs[23] = REG(X23);
dwarf_regs[24] = REG(X24);
dwarf_regs[25] = REG(X25);
dwarf_regs[26] = REG(X26);
dwarf_regs[27] = REG(X27);
dwarf_regs[28] = REG(X28);
dwarf_regs[29] = REG(X29);
dwarf_regs[30] = REG(LR);
dwarf_regs[31] = REG(SP);
if (!dwfl_thread_state_registers(thread, 0, PERF_REG_ARM64_MAX,
dwarf_regs))
return false;
dwarf_pc = REG(PC);
dwfl_thread_state_register_pc(thread, dwarf_pc);
return true;
}
| linux-master | tools/perf/arch/arm64/util/unwind-libdw.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <memory.h>
#include "../../../util/evsel.h"
#include "../../../util/kvm-stat.h"
#include "arm64_exception_types.h"
#include "debug.h"
define_exit_reasons_table(arm64_exit_reasons, kvm_arm_exception_type);
define_exit_reasons_table(arm64_trap_exit_reasons, kvm_arm_exception_class);
const char *kvm_trap_exit_reason = "esr_ec";
const char *vcpu_id_str = "id";
const char *kvm_exit_reason = "ret";
const char *kvm_entry_trace = "kvm:kvm_entry";
const char *kvm_exit_trace = "kvm:kvm_exit";
const char *kvm_events_tp[] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
NULL,
};
static void event_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
key->info = 0;
key->key = evsel__intval(evsel, sample, kvm_exit_reason);
key->exit_reasons = arm64_exit_reasons;
/*
* TRAP exceptions carry exception class info in esr_ec field
* and, hence, we need to use a different exit_reasons table to
* properly decode event's est_ec.
*/
if (key->key == ARM_EXCEPTION_TRAP) {
key->key = evsel__intval(evsel, sample, kvm_trap_exit_reason);
key->exit_reasons = arm64_trap_exit_reasons;
}
}
static bool event_begin(struct evsel *evsel,
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
return evsel__name_is(evsel, kvm_entry_trace);
}
static bool event_end(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
if (evsel__name_is(evsel, kvm_exit_trace)) {
event_get_key(evsel, sample, key);
return true;
}
return false;
}
static struct kvm_events_ops exit_events = {
.is_begin_event = event_begin,
.is_end_event = event_end,
.decode_key = exit_event_decode_key,
.name = "VM-EXIT"
};
struct kvm_reg_events_ops kvm_reg_events_ops[] = {
{
.name = "vmexit",
.ops = &exit_events,
},
{ NULL, NULL },
};
const char * const kvm_skip_events[] = {
NULL,
};
int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
{
kvm->exit_reasons_isa = "arm64";
return 0;
}
| linux-master | tools/perf/arch/arm64/util/kvm-stat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (C) 2010 Will Deacon, ARM Ltd.
*/
#include <errno.h>
#include <stddef.h>
#include <string.h>
#include <dwarf-regs.h>
#include <linux/ptrace.h> /* for struct user_pt_regs */
#include <linux/stringify.h>
struct pt_regs_dwarfnum {
const char *name;
unsigned int dwarfnum;
};
#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
#define GPR_DWARFNUM_NAME(num) \
{.name = __stringify(%x##num), .dwarfnum = num}
#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
#define DWARFNUM2OFFSET(index) \
(index * sizeof((struct user_pt_regs *)0)->regs[0])
/*
* Reference:
* http://infocenter.arm.com/help/topic/com.arm.doc.ihi0057b/IHI0057B_aadwarf64.pdf
*/
static const struct pt_regs_dwarfnum regdwarfnum_table[] = {
GPR_DWARFNUM_NAME(0),
GPR_DWARFNUM_NAME(1),
GPR_DWARFNUM_NAME(2),
GPR_DWARFNUM_NAME(3),
GPR_DWARFNUM_NAME(4),
GPR_DWARFNUM_NAME(5),
GPR_DWARFNUM_NAME(6),
GPR_DWARFNUM_NAME(7),
GPR_DWARFNUM_NAME(8),
GPR_DWARFNUM_NAME(9),
GPR_DWARFNUM_NAME(10),
GPR_DWARFNUM_NAME(11),
GPR_DWARFNUM_NAME(12),
GPR_DWARFNUM_NAME(13),
GPR_DWARFNUM_NAME(14),
GPR_DWARFNUM_NAME(15),
GPR_DWARFNUM_NAME(16),
GPR_DWARFNUM_NAME(17),
GPR_DWARFNUM_NAME(18),
GPR_DWARFNUM_NAME(19),
GPR_DWARFNUM_NAME(20),
GPR_DWARFNUM_NAME(21),
GPR_DWARFNUM_NAME(22),
GPR_DWARFNUM_NAME(23),
GPR_DWARFNUM_NAME(24),
GPR_DWARFNUM_NAME(25),
GPR_DWARFNUM_NAME(26),
GPR_DWARFNUM_NAME(27),
GPR_DWARFNUM_NAME(28),
GPR_DWARFNUM_NAME(29),
REG_DWARFNUM_NAME("%lr", 30),
REG_DWARFNUM_NAME("%sp", 31),
REG_DWARFNUM_END,
};
/**
* get_arch_regstr() - lookup register name from it's DWARF register number
* @n: the DWARF register number
*
* get_arch_regstr() returns the name of the register in struct
* regdwarfnum_table from it's DWARF register number. If the register is not
* found in the table, this returns NULL;
*/
const char *get_arch_regstr(unsigned int n)
{
const struct pt_regs_dwarfnum *roff;
for (roff = regdwarfnum_table; roff->name != NULL; roff++)
if (roff->dwarfnum == n)
return roff->name;
return NULL;
}
int regs_query_register_offset(const char *name)
{
const struct pt_regs_dwarfnum *roff;
for (roff = regdwarfnum_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return DWARFNUM2OFFSET(roff->dwarfnum);
return -EINVAL;
}
| linux-master | tools/perf/arch/arm64/util/dwarf-regs.c |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include "debug.h"
#include "symbol.h"
#include "callchain.h"
#include "perf_regs.h"
#include "record.h"
#include "util/perf_regs.h"
void arch__add_leaf_frame_record_opts(struct record_opts *opts)
{
opts->sample_user_regs |= sample_reg_masks[PERF_REG_ARM64_LR].mask;
}
| linux-master | tools/perf/arch/arm64/util/machine.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include "perf_regs.h"
#include "../../../util/unwind.h"
#endif
#include "../../../util/debug.h"
int LIBUNWIND__ARCH_REG_ID(int regnum)
{
if (regnum < 0 || regnum >= PERF_REG_ARM64_EXTENDED_MAX)
return -EINVAL;
return regnum;
}
| linux-master | tools/perf/arch/arm64/util/unwind-libunwind.c |
#include <linux/kernel.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <stdio.h>
#include <stdlib.h>
#include <perf/cpumap.h>
#include <util/cpumap.h>
#include <internal/cpumap.h>
#include <api/fs/fs.h>
#include <errno.h>
#include "debug.h"
#include "header.h"
#define MIDR "/regs/identification/midr_el1"
#define MIDR_SIZE 19
#define MIDR_REVISION_MASK GENMASK(3, 0)
#define MIDR_VARIANT_MASK GENMASK(23, 20)
static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
{
const char *sysfs = sysfs__mountpoint();
int cpu;
int ret = EINVAL;
if (!sysfs || sz < MIDR_SIZE)
return EINVAL;
cpus = perf_cpu_map__get(cpus);
for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
char path[PATH_MAX];
FILE *file;
scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d" MIDR,
sysfs, RC_CHK_ACCESS(cpus)->map[cpu].cpu);
file = fopen(path, "r");
if (!file) {
pr_debug("fopen failed for file %s\n", path);
continue;
}
if (!fgets(buf, MIDR_SIZE, file)) {
fclose(file);
continue;
}
fclose(file);
/* got midr break loop */
ret = 0;
break;
}
perf_cpu_map__put(cpus);
return ret;
}
int get_cpuid(char *buf, size_t sz)
{
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
int ret;
if (!cpus)
return EINVAL;
ret = _get_cpuid(buf, sz, cpus);
perf_cpu_map__put(cpus);
return ret;
}
char *get_cpuid_str(struct perf_pmu *pmu)
{
char *buf = NULL;
int res;
if (!pmu || !pmu->cpus)
return NULL;
buf = malloc(MIDR_SIZE);
if (!buf)
return NULL;
/* read midr from list of cpus mapped to this pmu */
res = _get_cpuid(buf, MIDR_SIZE, pmu->cpus);
if (res) {
pr_err("failed to get cpuid string for PMU %s\n", pmu->name);
free(buf);
buf = NULL;
}
return buf;
}
/*
* Return 0 if idstr is a higher or equal to version of the same part as
* mapcpuid. Therefore, if mapcpuid has 0 for revision and variant then any
* version of idstr will match as long as it's the same CPU type.
*
* Return 1 if the CPU type is different or the version of idstr is lower.
*/
int strcmp_cpuid_str(const char *mapcpuid, const char *idstr)
{
u64 map_id = strtoull(mapcpuid, NULL, 16);
char map_id_variant = FIELD_GET(MIDR_VARIANT_MASK, map_id);
char map_id_revision = FIELD_GET(MIDR_REVISION_MASK, map_id);
u64 id = strtoull(idstr, NULL, 16);
char id_variant = FIELD_GET(MIDR_VARIANT_MASK, id);
char id_revision = FIELD_GET(MIDR_REVISION_MASK, id);
u64 id_fields = ~(MIDR_VARIANT_MASK | MIDR_REVISION_MASK);
/* Compare without version first */
if ((map_id & id_fields) != (id & id_fields))
return 1;
/*
* ID matches, now compare version.
*
* Arm revisions (like r0p0) are compared here like two digit semver
* values eg. 1.3 < 2.0 < 2.1 < 2.2.
*
* r = high value = 'Variant' field in MIDR
* p = low value = 'Revision' field in MIDR
*
*/
if (id_variant > map_id_variant)
return 0;
if (id_variant == map_id_variant && id_revision >= map_id_revision)
return 0;
/*
* variant is less than mapfile variant or variants are the same but
* the revision doesn't match. Return no match.
*/
return 1;
}
| linux-master | tools/perf/arch/arm64/util/header.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <regex.h>
#include <string.h>
#include <sys/auxv.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include "perf_regs.h"
#include "../../../perf-sys.h"
#include "../../../util/debug.h"
#include "../../../util/event.h"
#include "../../../util/perf_regs.h"
#ifndef HWCAP_SVE
#define HWCAP_SVE (1 << 22)
#endif
const struct sample_reg sample_reg_masks[] = {
SMPL_REG(x0, PERF_REG_ARM64_X0),
SMPL_REG(x1, PERF_REG_ARM64_X1),
SMPL_REG(x2, PERF_REG_ARM64_X2),
SMPL_REG(x3, PERF_REG_ARM64_X3),
SMPL_REG(x4, PERF_REG_ARM64_X4),
SMPL_REG(x5, PERF_REG_ARM64_X5),
SMPL_REG(x6, PERF_REG_ARM64_X6),
SMPL_REG(x7, PERF_REG_ARM64_X7),
SMPL_REG(x8, PERF_REG_ARM64_X8),
SMPL_REG(x9, PERF_REG_ARM64_X9),
SMPL_REG(x10, PERF_REG_ARM64_X10),
SMPL_REG(x11, PERF_REG_ARM64_X11),
SMPL_REG(x12, PERF_REG_ARM64_X12),
SMPL_REG(x13, PERF_REG_ARM64_X13),
SMPL_REG(x14, PERF_REG_ARM64_X14),
SMPL_REG(x15, PERF_REG_ARM64_X15),
SMPL_REG(x16, PERF_REG_ARM64_X16),
SMPL_REG(x17, PERF_REG_ARM64_X17),
SMPL_REG(x18, PERF_REG_ARM64_X18),
SMPL_REG(x19, PERF_REG_ARM64_X19),
SMPL_REG(x20, PERF_REG_ARM64_X20),
SMPL_REG(x21, PERF_REG_ARM64_X21),
SMPL_REG(x22, PERF_REG_ARM64_X22),
SMPL_REG(x23, PERF_REG_ARM64_X23),
SMPL_REG(x24, PERF_REG_ARM64_X24),
SMPL_REG(x25, PERF_REG_ARM64_X25),
SMPL_REG(x26, PERF_REG_ARM64_X26),
SMPL_REG(x27, PERF_REG_ARM64_X27),
SMPL_REG(x28, PERF_REG_ARM64_X28),
SMPL_REG(x29, PERF_REG_ARM64_X29),
SMPL_REG(lr, PERF_REG_ARM64_LR),
SMPL_REG(sp, PERF_REG_ARM64_SP),
SMPL_REG(pc, PERF_REG_ARM64_PC),
SMPL_REG(vg, PERF_REG_ARM64_VG),
SMPL_REG_END
};
/* %xNUM */
#define SDT_OP_REGEX1 "^(x[1-2]?[0-9]|3[0-1])$"
/* [sp], [sp, NUM] */
#define SDT_OP_REGEX2 "^\\[sp(, )?([0-9]+)?\\]$"
static regex_t sdt_op_regex1, sdt_op_regex2;
static int sdt_init_op_regex(void)
{
static int initialized;
int ret = 0;
if (initialized)
return 0;
ret = regcomp(&sdt_op_regex1, SDT_OP_REGEX1, REG_EXTENDED);
if (ret)
goto error;
ret = regcomp(&sdt_op_regex2, SDT_OP_REGEX2, REG_EXTENDED);
if (ret)
goto free_regex1;
initialized = 1;
return 0;
free_regex1:
regfree(&sdt_op_regex1);
error:
pr_debug4("Regex compilation error.\n");
return ret;
}
/*
* SDT marker arguments on Arm64 uses %xREG or [sp, NUM], currently
* support these two formats.
*/
int arch_sdt_arg_parse_op(char *old_op, char **new_op)
{
int ret, new_len;
regmatch_t rm[5];
ret = sdt_init_op_regex();
if (ret < 0)
return ret;
if (!regexec(&sdt_op_regex1, old_op, 3, rm, 0)) {
/* Extract xNUM */
new_len = 2; /* % NULL */
new_len += (int)(rm[1].rm_eo - rm[1].rm_so);
*new_op = zalloc(new_len);
if (!*new_op)
return -ENOMEM;
scnprintf(*new_op, new_len, "%%%.*s",
(int)(rm[1].rm_eo - rm[1].rm_so), old_op + rm[1].rm_so);
} else if (!regexec(&sdt_op_regex2, old_op, 5, rm, 0)) {
/* [sp], [sp, NUM] or [sp,NUM] */
new_len = 7; /* + ( % s p ) NULL */
/* If the argument is [sp], need to fill offset '0' */
if (rm[2].rm_so == -1)
new_len += 1;
else
new_len += (int)(rm[2].rm_eo - rm[2].rm_so);
*new_op = zalloc(new_len);
if (!*new_op)
return -ENOMEM;
if (rm[2].rm_so == -1)
scnprintf(*new_op, new_len, "+0(%%sp)");
else
scnprintf(*new_op, new_len, "+%.*s(%%sp)",
(int)(rm[2].rm_eo - rm[2].rm_so),
old_op + rm[2].rm_so);
} else {
pr_debug4("Skipping unsupported SDT argument: %s\n", old_op);
return SDT_ARG_SKIP;
}
return SDT_ARG_VALID;
}
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.sample_type = PERF_SAMPLE_REGS_USER,
.disabled = 1,
.exclude_kernel = 1,
.sample_period = 1,
.sample_regs_user = PERF_REGS_MASK
};
int fd;
if (getauxval(AT_HWCAP) & HWCAP_SVE)
attr.sample_regs_user |= SMPL_REG_MASK(PERF_REG_ARM64_VG);
/*
* Check if the pmu supports perf extended regs, before
* returning the register mask to sample.
*/
if (attr.sample_regs_user != PERF_REGS_MASK) {
event_attr_init(&attr);
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd != -1) {
close(fd);
return attr.sample_regs_user;
}
}
return PERF_REGS_MASK;
}
| linux-master | tools/perf/arch/arm64/util/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include "../../../util/tsc.h"
u64 rdtsc(void)
{
u64 val;
/*
* According to ARM DDI 0487F.c, from Armv8.0 to Armv8.5 inclusive, the
* system counter is at least 56 bits wide; from Armv8.6, the counter
* must be 64 bits wide. So the system counter could be less than 64
* bits wide and it is attributed with the flag 'cap_user_time_short'
* is true.
*/
asm volatile("mrs %0, cntvct_el0" : "=r" (val));
return val;
}
| linux-master | tools/perf/arch/arm64/util/tsc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* HiSilicon PCIe Trace and Tuning (PTT) support
* Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
#include <time.h>
#include <internal/lib.h> // page_size
#include "../../../util/auxtrace.h"
#include "../../../util/cpumap.h"
#include "../../../util/debug.h"
#include "../../../util/event.h"
#include "../../../util/evlist.h"
#include "../../../util/evsel.h"
#include "../../../util/hisi-ptt.h"
#include "../../../util/pmu.h"
#include "../../../util/record.h"
#include "../../../util/session.h"
#include "../../../util/tsc.h"
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
struct hisi_ptt_recording {
struct auxtrace_record itr;
struct perf_pmu *hisi_ptt_pmu;
struct evlist *evlist;
};
static size_t
hisi_ptt_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct evlist *evlist __maybe_unused)
{
return HISI_PTT_AUXTRACE_PRIV_SIZE;
}
static int hisi_ptt_info_fill(struct auxtrace_record *itr,
struct perf_session *session,
struct perf_record_auxtrace_info *auxtrace_info,
size_t priv_size)
{
struct hisi_ptt_recording *pttr =
container_of(itr, struct hisi_ptt_recording, itr);
struct perf_pmu *hisi_ptt_pmu = pttr->hisi_ptt_pmu;
if (priv_size != HISI_PTT_AUXTRACE_PRIV_SIZE)
return -EINVAL;
if (!session->evlist->core.nr_mmaps)
return -EINVAL;
auxtrace_info->type = PERF_AUXTRACE_HISI_PTT;
auxtrace_info->priv[0] = hisi_ptt_pmu->type;
return 0;
}
static int hisi_ptt_set_auxtrace_mmap_page(struct record_opts *opts)
{
bool privileged = perf_event_paranoid_check(-1);
if (!opts->full_auxtrace)
return 0;
if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(16) / page_size;
} else {
opts->auxtrace_mmap_pages = KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
}
/* Validate auxtrace_mmap_pages */
if (opts->auxtrace_mmap_pages) {
size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
size_t min_sz = KiB(8);
if (sz < min_sz || !is_power_of_2(sz)) {
pr_err("Invalid mmap size for HISI PTT: must be at least %zuKiB and a power of 2\n",
min_sz / 1024);
return -EINVAL;
}
}
return 0;
}
static int hisi_ptt_recording_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
{
struct hisi_ptt_recording *pttr =
container_of(itr, struct hisi_ptt_recording, itr);
struct perf_pmu *hisi_ptt_pmu = pttr->hisi_ptt_pmu;
struct evsel *evsel, *hisi_ptt_evsel = NULL;
struct evsel *tracking_evsel;
int err;
pttr->evlist = evlist;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == hisi_ptt_pmu->type) {
if (hisi_ptt_evsel) {
pr_err("There may be only one " HISI_PTT_PMU_NAME "x event\n");
return -EINVAL;
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
evsel->needs_auxtrace_mmap = true;
hisi_ptt_evsel = evsel;
opts->full_auxtrace = true;
}
}
err = hisi_ptt_set_auxtrace_mmap_page(opts);
if (err)
return err;
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace event
* must come first.
*/
evlist__to_front(evlist, hisi_ptt_evsel);
evsel__set_sample_bit(hisi_ptt_evsel, TIME);
/* Add dummy event to keep tracking */
err = parse_event(evlist, "dummy:u");
if (err)
return err;
tracking_evsel = evlist__last(evlist);
evlist__set_tracking_event(evlist, tracking_evsel);
tracking_evsel->core.attr.freq = 0;
tracking_evsel->core.attr.sample_period = 1;
evsel__set_sample_bit(tracking_evsel, TIME);
return 0;
}
static u64 hisi_ptt_reference(struct auxtrace_record *itr __maybe_unused)
{
return rdtsc();
}
static void hisi_ptt_recording_free(struct auxtrace_record *itr)
{
struct hisi_ptt_recording *pttr =
container_of(itr, struct hisi_ptt_recording, itr);
free(pttr);
}
struct auxtrace_record *hisi_ptt_recording_init(int *err,
struct perf_pmu *hisi_ptt_pmu)
{
struct hisi_ptt_recording *pttr;
if (!hisi_ptt_pmu) {
*err = -ENODEV;
return NULL;
}
pttr = zalloc(sizeof(*pttr));
if (!pttr) {
*err = -ENOMEM;
return NULL;
}
pttr->hisi_ptt_pmu = hisi_ptt_pmu;
pttr->itr.pmu = hisi_ptt_pmu;
pttr->itr.recording_options = hisi_ptt_recording_options;
pttr->itr.info_priv_size = hisi_ptt_info_priv_size;
pttr->itr.info_fill = hisi_ptt_info_fill;
pttr->itr.free = hisi_ptt_recording_free;
pttr->itr.reference = hisi_ptt_reference;
pttr->itr.read_finish = auxtrace_record__read_finish;
pttr->itr.alignment = 0;
*err = 0;
return &pttr->itr;
}
| linux-master | tools/perf/arch/arm64/util/hisi-ptt.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "tests/tests.h"
#include "arch-tests.h"
DEFINE_SUITE("arm64 CPUID matching", cpuid_match);
struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
&suite__dwarf_unwind,
#endif
&suite__cpuid_match,
NULL,
};
| linux-master | tools/perf/arch/arm64/tests/arch-tests.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
#include "maps.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
#define STACK_SIZE 8192
static int sample_ustack(struct perf_sample *sample,
struct thread *thread, u64 *regs)
{
struct stack_dump *stack = &sample->user_stack;
struct map *map;
unsigned long sp;
u64 stack_size, *buf;
buf = malloc(STACK_SIZE);
if (!buf) {
pr_debug("failed to allocate sample uregs data\n");
return -1;
}
sp = (unsigned long) regs[PERF_REG_ARM64_SP];
map = maps__find(thread__maps(thread), (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
return -1;
}
stack_size = map__end(map) - sp;
stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
memcpy(buf, (void *) sp, stack_size);
stack->data = (char *) buf;
stack->size = stack_size;
return 0;
}
int test__arch_unwind_sample(struct perf_sample *sample,
struct thread *thread)
{
struct regs_dump *regs = &sample->user_regs;
u64 *buf;
buf = calloc(1, sizeof(u64) * PERF_REGS_MAX);
if (!buf) {
pr_debug("failed to allocate sample uregs data\n");
return -1;
}
perf_regs_load(buf);
regs->abi = PERF_SAMPLE_REGS_ABI;
regs->regs = buf;
regs->mask = PERF_REGS_MASK;
return sample_ustack(sample, thread, buf);
}
| linux-master | tools/perf/arch/arm64/tests/dwarf-unwind.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include "arch-tests.h"
#include "tests/tests.h"
#include "util/header.h"
int test__cpuid_match(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
/* midr with no leading zeros matches */
if (strcmp_cpuid_str("0x410fd0c0", "0x00000000410fd0c0"))
return -1;
/* Upper case matches */
if (strcmp_cpuid_str("0x410fd0c0", "0x00000000410FD0C0"))
return -1;
/* r0p0 = r0p0 matches */
if (strcmp_cpuid_str("0x00000000410fd480", "0x00000000410fd480"))
return -1;
/* r0p1 > r0p0 matches */
if (strcmp_cpuid_str("0x00000000410fd480", "0x00000000410fd481"))
return -1;
/* r1p0 > r0p0 matches*/
if (strcmp_cpuid_str("0x00000000410fd480", "0x00000000411fd480"))
return -1;
/* r0p0 < r0p1 doesn't match */
if (!strcmp_cpuid_str("0x00000000410fd481", "0x00000000410fd480"))
return -1;
/* r0p0 < r1p0 doesn't match */
if (!strcmp_cpuid_str("0x00000000411fd480", "0x00000000410fd480"))
return -1;
/* Different CPU doesn't match */
if (!strcmp_cpuid_str("0x00000000410fd4c0", "0x00000000430f0af0"))
return -1;
return 0;
}
| linux-master | tools/perf/arch/arm64/tests/cpuid-match.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <sys/types.h>
#include <regex.h>
#include <stdlib.h>
struct arm64_annotate {
regex_t call_insn,
jump_insn;
};
static int arm64_mov__parse(struct arch *arch __maybe_unused,
struct ins_operands *ops,
struct map_symbol *ms __maybe_unused)
{
char *s = strchr(ops->raw, ','), *target, *endptr;
if (s == NULL)
return -1;
*s = '\0';
ops->source.raw = strdup(ops->raw);
*s = ',';
if (ops->source.raw == NULL)
return -1;
target = ++s;
ops->target.raw = strdup(target);
if (ops->target.raw == NULL)
goto out_free_source;
ops->target.addr = strtoull(target, &endptr, 16);
if (endptr == target)
goto out_free_target;
s = strchr(endptr, '<');
if (s == NULL)
goto out_free_target;
endptr = strchr(s + 1, '>');
if (endptr == NULL)
goto out_free_target;
*endptr = '\0';
*s = ' ';
ops->target.name = strdup(s);
*s = '<';
*endptr = '>';
if (ops->target.name == NULL)
goto out_free_target;
return 0;
out_free_target:
zfree(&ops->target.raw);
out_free_source:
zfree(&ops->source.raw);
return -1;
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
static struct ins_ops arm64_mov_ops = {
.parse = arm64_mov__parse,
.scnprintf = mov__scnprintf,
};
static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const char *name)
{
struct arm64_annotate *arm = arch->priv;
struct ins_ops *ops;
regmatch_t match[2];
if (!regexec(&arm->jump_insn, name, 2, match, 0))
ops = &jump_ops;
else if (!regexec(&arm->call_insn, name, 2, match, 0))
ops = &call_ops;
else if (!strcmp(name, "ret"))
ops = &ret_ops;
else
ops = &arm64_mov_ops;
arch__associate_ins_ops(arch, name, ops);
return ops;
}
static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
struct arm64_annotate *arm;
int err;
if (arch->initialized)
return 0;
arm = zalloc(sizeof(*arm));
if (!arm)
return ENOMEM;
/* bl, blr */
err = regcomp(&arm->call_insn, "^blr?$", REG_EXTENDED);
if (err)
goto out_free_arm;
/* b, b.cond, br, cbz/cbnz, tbz/tbnz */
err = regcomp(&arm->jump_insn, "^[ct]?br?\\.?(cc|cs|eq|ge|gt|hi|hs|le|lo|ls|lt|mi|ne|pl|vc|vs)?n?z?$",
REG_EXTENDED);
if (err)
goto out_free_call;
arch->initialized = true;
arch->priv = arm;
arch->associate_instruction_ops = arm64__associate_instruction_ops;
arch->objdump.comment_char = '/';
arch->objdump.skip_functions_char = '+';
return 0;
out_free_call:
regfree(&arm->call_insn);
out_free_arm:
free(arm);
return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP;
}
| linux-master | tools/perf/arch/arm64/annotate/instructions.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Context.c. Python interfaces for perf script.
*
* Copyright (C) 2010 Tom Zanussi <[email protected]>
*/
/*
* Use Py_ssize_t for '#' formats to avoid DeprecationWarning: PY_SSIZE_T_CLEAN
* will be required for '#' formats.
*/
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "../../../util/trace-event.h"
#include "../../../util/event.h"
#include "../../../util/symbol.h"
#include "../../../util/thread.h"
#include "../../../util/map.h"
#include "../../../util/maps.h"
#include "../../../util/auxtrace.h"
#include "../../../util/session.h"
#include "../../../util/srcline.h"
#include "../../../util/srccode.h"
#if PY_MAJOR_VERSION < 3
#define _PyCapsule_GetPointer(arg1, arg2) \
PyCObject_AsVoidPtr(arg1)
#define _PyBytes_FromStringAndSize(arg1, arg2) \
PyString_FromStringAndSize((arg1), (arg2))
#define _PyUnicode_AsUTF8(arg) \
PyString_AsString(arg)
PyMODINIT_FUNC initperf_trace_context(void);
#else
#define _PyCapsule_GetPointer(arg1, arg2) \
PyCapsule_GetPointer((arg1), (arg2))
#define _PyBytes_FromStringAndSize(arg1, arg2) \
PyBytes_FromStringAndSize((arg1), (arg2))
#define _PyUnicode_AsUTF8(arg) \
PyUnicode_AsUTF8(arg)
PyMODINIT_FUNC PyInit_perf_trace_context(void);
#endif
static struct scripting_context *get_args(PyObject *args, const char *name, PyObject **arg2)
{
int cnt = 1 + !!arg2;
PyObject *context;
if (!PyArg_UnpackTuple(args, name, 1, cnt, &context, arg2))
return NULL;
return _PyCapsule_GetPointer(context, NULL);
}
static struct scripting_context *get_scripting_context(PyObject *args)
{
return get_args(args, "context", NULL);
}
#ifdef HAVE_LIBTRACEEVENT
static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
{
struct scripting_context *c = get_scripting_context(args);
if (!c)
return NULL;
return Py_BuildValue("i", common_pc(c));
}
static PyObject *perf_trace_context_common_flags(PyObject *obj,
PyObject *args)
{
struct scripting_context *c = get_scripting_context(args);
if (!c)
return NULL;
return Py_BuildValue("i", common_flags(c));
}
static PyObject *perf_trace_context_common_lock_depth(PyObject *obj,
PyObject *args)
{
struct scripting_context *c = get_scripting_context(args);
if (!c)
return NULL;
return Py_BuildValue("i", common_lock_depth(c));
}
#endif
static PyObject *perf_sample_insn(PyObject *obj, PyObject *args)
{
struct scripting_context *c = get_scripting_context(args);
if (!c)
return NULL;
if (c->sample->ip && !c->sample->insn_len && thread__maps(c->al->thread)) {
struct machine *machine = maps__machine(thread__maps(c->al->thread));
script_fetch_insn(c->sample, c->al->thread, machine);
}
if (!c->sample->insn_len)
Py_RETURN_NONE; /* N.B. This is a return statement */
return _PyBytes_FromStringAndSize(c->sample->insn, c->sample->insn_len);
}
static PyObject *perf_set_itrace_options(PyObject *obj, PyObject *args)
{
struct scripting_context *c;
const char *itrace_options;
int retval = -1;
PyObject *str;
c = get_args(args, "itrace_options", &str);
if (!c)
return NULL;
if (!c->session || !c->session->itrace_synth_opts)
goto out;
if (c->session->itrace_synth_opts->set) {
retval = 1;
goto out;
}
itrace_options = _PyUnicode_AsUTF8(str);
retval = itrace_do_parse_synth_opts(c->session->itrace_synth_opts, itrace_options, 0);
out:
return Py_BuildValue("i", retval);
}
static PyObject *perf_sample_src(PyObject *obj, PyObject *args, bool get_srccode)
{
struct scripting_context *c = get_scripting_context(args);
unsigned int line = 0;
char *srcfile = NULL;
char *srccode = NULL;
PyObject *result;
struct map *map;
struct dso *dso;
int len = 0;
u64 addr;
if (!c)
return NULL;
map = c->al->map;
addr = c->al->addr;
dso = map ? map__dso(map) : NULL;
if (dso)
srcfile = get_srcline_split(dso, map__rip_2objdump(map, addr), &line);
if (get_srccode) {
if (srcfile)
srccode = find_sourceline(srcfile, line, &len);
result = Py_BuildValue("(sIs#)", srcfile, line, srccode, (Py_ssize_t)len);
} else {
result = Py_BuildValue("(sI)", srcfile, line);
}
free(srcfile);
return result;
}
static PyObject *perf_sample_srcline(PyObject *obj, PyObject *args)
{
return perf_sample_src(obj, args, false);
}
static PyObject *perf_sample_srccode(PyObject *obj, PyObject *args)
{
return perf_sample_src(obj, args, true);
}
static PyMethodDef ContextMethods[] = {
#ifdef HAVE_LIBTRACEEVENT
{ "common_pc", perf_trace_context_common_pc, METH_VARARGS,
"Get the common preempt count event field value."},
{ "common_flags", perf_trace_context_common_flags, METH_VARARGS,
"Get the common flags event field value."},
{ "common_lock_depth", perf_trace_context_common_lock_depth,
METH_VARARGS, "Get the common lock depth event field value."},
#endif
{ "perf_sample_insn", perf_sample_insn,
METH_VARARGS, "Get the machine code instruction."},
{ "perf_set_itrace_options", perf_set_itrace_options,
METH_VARARGS, "Set --itrace options."},
{ "perf_sample_srcline", perf_sample_srcline,
METH_VARARGS, "Get source file name and line number."},
{ "perf_sample_srccode", perf_sample_srccode,
METH_VARARGS, "Get source file name, line number and line."},
{ NULL, NULL, 0, NULL}
};
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initperf_trace_context(void)
{
(void) Py_InitModule("perf_trace_context", ContextMethods);
}
#else
PyMODINIT_FUNC PyInit_perf_trace_context(void)
{
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"perf_trace_context", /* m_name */
"", /* m_doc */
-1, /* m_size */
ContextMethods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
PyObject *mod;
mod = PyModule_Create(&moduledef);
/* Add perf_script_context to the module so it can be imported */
PyObject_SetAttrString(mod, "perf_script_context", Py_None);
return mod;
}
#endif
| linux-master | tools/perf/scripts/python/Perf-Trace-Util/Context.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the
* contents of Context.xs. Do not edit this file, edit Context.xs instead.
*
* ANY CHANGES MADE HERE WILL BE LOST!
*/
#include <stdbool.h>
#ifndef HAS_BOOL
# define HAS_BOOL 1
#endif
#line 1 "Context.xs"
/*
* Context.xs. XS interfaces for perf script.
*
* Copyright (C) 2009 Tom Zanussi <[email protected]>
*/
#include "EXTERN.h"
#include "perl.h"
#include "XSUB.h"
#include "../../../util/trace-event.h"
#ifndef PERL_UNUSED_VAR
# define PERL_UNUSED_VAR(var) if (0) var = var
#endif
#line 42 "Context.c"
XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */
XS(XS_Perf__Trace__Context_common_pc)
{
#ifdef dVAR
dVAR; dXSARGS;
#else
dXSARGS;
#endif
if (items != 1)
Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context");
PERL_UNUSED_VAR(cv); /* -W */
{
struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
int RETVAL;
dXSTARG;
RETVAL = common_pc(context);
XSprePUSH; PUSHi((IV)RETVAL);
}
XSRETURN(1);
}
XS(XS_Perf__Trace__Context_common_flags); /* prototype to pass -Wmissing-prototypes */
XS(XS_Perf__Trace__Context_common_flags)
{
#ifdef dVAR
dVAR; dXSARGS;
#else
dXSARGS;
#endif
if (items != 1)
Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context");
PERL_UNUSED_VAR(cv); /* -W */
{
struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
int RETVAL;
dXSTARG;
RETVAL = common_flags(context);
XSprePUSH; PUSHi((IV)RETVAL);
}
XSRETURN(1);
}
XS(XS_Perf__Trace__Context_common_lock_depth); /* prototype to pass -Wmissing-prototypes */
XS(XS_Perf__Trace__Context_common_lock_depth)
{
#ifdef dVAR
dVAR; dXSARGS;
#else
dXSARGS;
#endif
if (items != 1)
Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context");
PERL_UNUSED_VAR(cv); /* -W */
{
struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
int RETVAL;
dXSTARG;
RETVAL = common_lock_depth(context);
XSprePUSH; PUSHi((IV)RETVAL);
}
XSRETURN(1);
}
#ifdef __cplusplus
extern "C"
#endif
XS(boot_Perf__Trace__Context); /* prototype to pass -Wmissing-prototypes */
XS(boot_Perf__Trace__Context)
{
#ifdef dVAR
dVAR; dXSARGS;
#else
dXSARGS;
#endif
const char* file = __FILE__;
PERL_UNUSED_VAR(cv); /* -W */
PERL_UNUSED_VAR(items); /* -W */
XS_VERSION_BOOTCHECK ;
newXSproto("Perf::Trace::Context::common_pc", XS_Perf__Trace__Context_common_pc, file, "$");
newXSproto("Perf::Trace::Context::common_flags", XS_Perf__Trace__Context_common_flags, file, "$");
newXSproto("Perf::Trace::Context::common_lock_depth", XS_Perf__Trace__Context_common_lock_depth, file, "$");
if (PL_unitcheckav)
call_list(PL_scopestack_ix, PL_unitcheckav);
XSRETURN_YES;
}
| linux-master | tools/perf/scripts/perl/Perf-Trace-Util/Context.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Minimal BPF debugger
*
* Minimal BPF debugger that mimics the kernel's engine (w/o extensions)
* and allows for single stepping through selected packets from a pcap
* with a provided user filter in order to facilitate verification of a
* BPF program. Besides others, this is useful to verify BPF programs
* before attaching to a live system, and can be used in socket filters,
* cls_bpf, xt_bpf, team driver and e.g. PTP code; in particular when a
* single more complex BPF program is being used. Reasons for a more
* complex BPF program are likely primarily to optimize execution time
* for making a verdict when multiple simple BPF programs are combined
* into one in order to prevent parsing same headers multiple times.
*
* More on how to debug BPF opcodes see Documentation/networking/filter.rst
* which is the main document on BPF. Mini howto for getting started:
*
* 1) `./bpf_dbg` to enter the shell (shell cmds denoted with '>'):
* 2) > load bpf 6,40 0 0 12,21 0 3 20... (output from `bpf_asm` or
* `tcpdump -iem1 -ddd port 22 | tr '\n' ','` to load as filter)
* 3) > load pcap foo.pcap
* 4) > run <n>/disassemble/dump/quit (self-explanatory)
* 5) > breakpoint 2 (sets bp at loaded BPF insns 2, do `run` then;
* multiple bps can be set, of course, a call to `breakpoint`
* w/o args shows currently loaded bps, `breakpoint reset` for
* resetting all breakpoints)
* 6) > select 3 (`run` etc will start from the 3rd packet in the pcap)
* 7) > step [-<n>, +<n>] (performs single stepping through the BPF)
*
* Copyright 2013 Daniel Borkmann <[email protected]>
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <stdbool.h>
#include <stdarg.h>
#include <setjmp.h>
#include <linux/filter.h>
#include <linux/if_packet.h>
#include <readline/readline.h>
#include <readline/history.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <errno.h>
#include <signal.h>
#include <arpa/inet.h>
#include <net/ethernet.h>
#define TCPDUMP_MAGIC 0xa1b2c3d4
#define BPF_LDX_B (BPF_LDX | BPF_B)
#define BPF_LDX_W (BPF_LDX | BPF_W)
#define BPF_JMP_JA (BPF_JMP | BPF_JA)
#define BPF_JMP_JEQ (BPF_JMP | BPF_JEQ)
#define BPF_JMP_JGT (BPF_JMP | BPF_JGT)
#define BPF_JMP_JGE (BPF_JMP | BPF_JGE)
#define BPF_JMP_JSET (BPF_JMP | BPF_JSET)
#define BPF_ALU_ADD (BPF_ALU | BPF_ADD)
#define BPF_ALU_SUB (BPF_ALU | BPF_SUB)
#define BPF_ALU_MUL (BPF_ALU | BPF_MUL)
#define BPF_ALU_DIV (BPF_ALU | BPF_DIV)
#define BPF_ALU_MOD (BPF_ALU | BPF_MOD)
#define BPF_ALU_NEG (BPF_ALU | BPF_NEG)
#define BPF_ALU_AND (BPF_ALU | BPF_AND)
#define BPF_ALU_OR (BPF_ALU | BPF_OR)
#define BPF_ALU_XOR (BPF_ALU | BPF_XOR)
#define BPF_ALU_LSH (BPF_ALU | BPF_LSH)
#define BPF_ALU_RSH (BPF_ALU | BPF_RSH)
#define BPF_MISC_TAX (BPF_MISC | BPF_TAX)
#define BPF_MISC_TXA (BPF_MISC | BPF_TXA)
#define BPF_LD_B (BPF_LD | BPF_B)
#define BPF_LD_H (BPF_LD | BPF_H)
#define BPF_LD_W (BPF_LD | BPF_W)
#ifndef array_size
# define array_size(x) (sizeof(x) / sizeof((x)[0]))
#endif
#ifndef __check_format_printf
# define __check_format_printf(pos_fmtstr, pos_fmtargs) \
__attribute__ ((format (printf, (pos_fmtstr), (pos_fmtargs))))
#endif
enum {
CMD_OK,
CMD_ERR,
CMD_EX,
};
struct shell_cmd {
const char *name;
int (*func)(char *args);
};
struct pcap_filehdr {
uint32_t magic;
uint16_t version_major;
uint16_t version_minor;
int32_t thiszone;
uint32_t sigfigs;
uint32_t snaplen;
uint32_t linktype;
};
struct pcap_timeval {
int32_t tv_sec;
int32_t tv_usec;
};
struct pcap_pkthdr {
struct pcap_timeval ts;
uint32_t caplen;
uint32_t len;
};
struct bpf_regs {
uint32_t A;
uint32_t X;
uint32_t M[BPF_MEMWORDS];
uint32_t R;
bool Rs;
uint16_t Pc;
};
static struct sock_filter bpf_image[BPF_MAXINSNS + 1];
static unsigned int bpf_prog_len;
static int bpf_breakpoints[64];
static struct bpf_regs bpf_regs[BPF_MAXINSNS + 1];
static struct bpf_regs bpf_curr;
static unsigned int bpf_regs_len;
static int pcap_fd = -1;
static unsigned int pcap_packet;
static size_t pcap_map_size;
static char *pcap_ptr_va_start, *pcap_ptr_va_curr;
static const char * const op_table[] = {
[BPF_ST] = "st",
[BPF_STX] = "stx",
[BPF_LD_B] = "ldb",
[BPF_LD_H] = "ldh",
[BPF_LD_W] = "ld",
[BPF_LDX] = "ldx",
[BPF_LDX_B] = "ldxb",
[BPF_JMP_JA] = "ja",
[BPF_JMP_JEQ] = "jeq",
[BPF_JMP_JGT] = "jgt",
[BPF_JMP_JGE] = "jge",
[BPF_JMP_JSET] = "jset",
[BPF_ALU_ADD] = "add",
[BPF_ALU_SUB] = "sub",
[BPF_ALU_MUL] = "mul",
[BPF_ALU_DIV] = "div",
[BPF_ALU_MOD] = "mod",
[BPF_ALU_NEG] = "neg",
[BPF_ALU_AND] = "and",
[BPF_ALU_OR] = "or",
[BPF_ALU_XOR] = "xor",
[BPF_ALU_LSH] = "lsh",
[BPF_ALU_RSH] = "rsh",
[BPF_MISC_TAX] = "tax",
[BPF_MISC_TXA] = "txa",
[BPF_RET] = "ret",
};
static __check_format_printf(1, 2) int rl_printf(const char *fmt, ...)
{
int ret;
va_list vl;
va_start(vl, fmt);
ret = vfprintf(rl_outstream, fmt, vl);
va_end(vl);
return ret;
}
static int matches(const char *cmd, const char *pattern)
{
int len = strlen(cmd);
if (len > strlen(pattern))
return -1;
return memcmp(pattern, cmd, len);
}
static void hex_dump(const uint8_t *buf, size_t len)
{
int i;
rl_printf("%3u: ", 0);
for (i = 0; i < len; i++) {
if (i && !(i % 16))
rl_printf("\n%3u: ", i);
rl_printf("%02x ", buf[i]);
}
rl_printf("\n");
}
static bool bpf_prog_loaded(void)
{
if (bpf_prog_len == 0)
rl_printf("no bpf program loaded!\n");
return bpf_prog_len > 0;
}
static void bpf_disasm(const struct sock_filter f, unsigned int i)
{
const char *op, *fmt;
int val = f.k;
char buf[256];
switch (f.code) {
case BPF_RET | BPF_K:
op = op_table[BPF_RET];
fmt = "#%#x";
break;
case BPF_RET | BPF_A:
op = op_table[BPF_RET];
fmt = "a";
break;
case BPF_RET | BPF_X:
op = op_table[BPF_RET];
fmt = "x";
break;
case BPF_MISC_TAX:
op = op_table[BPF_MISC_TAX];
fmt = "";
break;
case BPF_MISC_TXA:
op = op_table[BPF_MISC_TXA];
fmt = "";
break;
case BPF_ST:
op = op_table[BPF_ST];
fmt = "M[%d]";
break;
case BPF_STX:
op = op_table[BPF_STX];
fmt = "M[%d]";
break;
case BPF_LD_W | BPF_ABS:
op = op_table[BPF_LD_W];
fmt = "[%d]";
break;
case BPF_LD_H | BPF_ABS:
op = op_table[BPF_LD_H];
fmt = "[%d]";
break;
case BPF_LD_B | BPF_ABS:
op = op_table[BPF_LD_B];
fmt = "[%d]";
break;
case BPF_LD_W | BPF_LEN:
op = op_table[BPF_LD_W];
fmt = "#len";
break;
case BPF_LD_W | BPF_IND:
op = op_table[BPF_LD_W];
fmt = "[x+%d]";
break;
case BPF_LD_H | BPF_IND:
op = op_table[BPF_LD_H];
fmt = "[x+%d]";
break;
case BPF_LD_B | BPF_IND:
op = op_table[BPF_LD_B];
fmt = "[x+%d]";
break;
case BPF_LD | BPF_IMM:
op = op_table[BPF_LD_W];
fmt = "#%#x";
break;
case BPF_LDX | BPF_IMM:
op = op_table[BPF_LDX];
fmt = "#%#x";
break;
case BPF_LDX_B | BPF_MSH:
op = op_table[BPF_LDX_B];
fmt = "4*([%d]&0xf)";
break;
case BPF_LD | BPF_MEM:
op = op_table[BPF_LD_W];
fmt = "M[%d]";
break;
case BPF_LDX | BPF_MEM:
op = op_table[BPF_LDX];
fmt = "M[%d]";
break;
case BPF_JMP_JA:
op = op_table[BPF_JMP_JA];
fmt = "%d";
val = i + 1 + f.k;
break;
case BPF_JMP_JGT | BPF_X:
op = op_table[BPF_JMP_JGT];
fmt = "x";
break;
case BPF_JMP_JGT | BPF_K:
op = op_table[BPF_JMP_JGT];
fmt = "#%#x";
break;
case BPF_JMP_JGE | BPF_X:
op = op_table[BPF_JMP_JGE];
fmt = "x";
break;
case BPF_JMP_JGE | BPF_K:
op = op_table[BPF_JMP_JGE];
fmt = "#%#x";
break;
case BPF_JMP_JEQ | BPF_X:
op = op_table[BPF_JMP_JEQ];
fmt = "x";
break;
case BPF_JMP_JEQ | BPF_K:
op = op_table[BPF_JMP_JEQ];
fmt = "#%#x";
break;
case BPF_JMP_JSET | BPF_X:
op = op_table[BPF_JMP_JSET];
fmt = "x";
break;
case BPF_JMP_JSET | BPF_K:
op = op_table[BPF_JMP_JSET];
fmt = "#%#x";
break;
case BPF_ALU_NEG:
op = op_table[BPF_ALU_NEG];
fmt = "";
break;
case BPF_ALU_LSH | BPF_X:
op = op_table[BPF_ALU_LSH];
fmt = "x";
break;
case BPF_ALU_LSH | BPF_K:
op = op_table[BPF_ALU_LSH];
fmt = "#%d";
break;
case BPF_ALU_RSH | BPF_X:
op = op_table[BPF_ALU_RSH];
fmt = "x";
break;
case BPF_ALU_RSH | BPF_K:
op = op_table[BPF_ALU_RSH];
fmt = "#%d";
break;
case BPF_ALU_ADD | BPF_X:
op = op_table[BPF_ALU_ADD];
fmt = "x";
break;
case BPF_ALU_ADD | BPF_K:
op = op_table[BPF_ALU_ADD];
fmt = "#%d";
break;
case BPF_ALU_SUB | BPF_X:
op = op_table[BPF_ALU_SUB];
fmt = "x";
break;
case BPF_ALU_SUB | BPF_K:
op = op_table[BPF_ALU_SUB];
fmt = "#%d";
break;
case BPF_ALU_MUL | BPF_X:
op = op_table[BPF_ALU_MUL];
fmt = "x";
break;
case BPF_ALU_MUL | BPF_K:
op = op_table[BPF_ALU_MUL];
fmt = "#%d";
break;
case BPF_ALU_DIV | BPF_X:
op = op_table[BPF_ALU_DIV];
fmt = "x";
break;
case BPF_ALU_DIV | BPF_K:
op = op_table[BPF_ALU_DIV];
fmt = "#%d";
break;
case BPF_ALU_MOD | BPF_X:
op = op_table[BPF_ALU_MOD];
fmt = "x";
break;
case BPF_ALU_MOD | BPF_K:
op = op_table[BPF_ALU_MOD];
fmt = "#%d";
break;
case BPF_ALU_AND | BPF_X:
op = op_table[BPF_ALU_AND];
fmt = "x";
break;
case BPF_ALU_AND | BPF_K:
op = op_table[BPF_ALU_AND];
fmt = "#%#x";
break;
case BPF_ALU_OR | BPF_X:
op = op_table[BPF_ALU_OR];
fmt = "x";
break;
case BPF_ALU_OR | BPF_K:
op = op_table[BPF_ALU_OR];
fmt = "#%#x";
break;
case BPF_ALU_XOR | BPF_X:
op = op_table[BPF_ALU_XOR];
fmt = "x";
break;
case BPF_ALU_XOR | BPF_K:
op = op_table[BPF_ALU_XOR];
fmt = "#%#x";
break;
default:
op = "nosup";
fmt = "%#x";
val = f.code;
break;
}
memset(buf, 0, sizeof(buf));
snprintf(buf, sizeof(buf), fmt, val);
buf[sizeof(buf) - 1] = 0;
if ((BPF_CLASS(f.code) == BPF_JMP && BPF_OP(f.code) != BPF_JA))
rl_printf("l%d:\t%s %s, l%d, l%d\n", i, op, buf,
i + 1 + f.jt, i + 1 + f.jf);
else
rl_printf("l%d:\t%s %s\n", i, op, buf);
}
static void bpf_dump_curr(struct bpf_regs *r, struct sock_filter *f)
{
int i, m = 0;
rl_printf("pc: [%u]\n", r->Pc);
rl_printf("code: [%u] jt[%u] jf[%u] k[%u]\n",
f->code, f->jt, f->jf, f->k);
rl_printf("curr: ");
bpf_disasm(*f, r->Pc);
if (f->jt || f->jf) {
rl_printf("jt: ");
bpf_disasm(*(f + f->jt + 1), r->Pc + f->jt + 1);
rl_printf("jf: ");
bpf_disasm(*(f + f->jf + 1), r->Pc + f->jf + 1);
}
rl_printf("A: [%#08x][%u]\n", r->A, r->A);
rl_printf("X: [%#08x][%u]\n", r->X, r->X);
if (r->Rs)
rl_printf("ret: [%#08x][%u]!\n", r->R, r->R);
for (i = 0; i < BPF_MEMWORDS; i++) {
if (r->M[i]) {
m++;
rl_printf("M[%d]: [%#08x][%u]\n", i, r->M[i], r->M[i]);
}
}
if (m == 0)
rl_printf("M[0,%d]: [%#08x][%u]\n", BPF_MEMWORDS - 1, 0, 0);
}
static void bpf_dump_pkt(uint8_t *pkt, uint32_t pkt_caplen, uint32_t pkt_len)
{
if (pkt_caplen != pkt_len)
rl_printf("cap: %u, len: %u\n", pkt_caplen, pkt_len);
else
rl_printf("len: %u\n", pkt_len);
hex_dump(pkt, pkt_caplen);
}
static void bpf_disasm_all(const struct sock_filter *f, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; i++)
bpf_disasm(f[i], i);
}
static void bpf_dump_all(const struct sock_filter *f, unsigned int len)
{
unsigned int i;
rl_printf("/* { op, jt, jf, k }, */\n");
for (i = 0; i < len; i++)
rl_printf("{ %#04x, %2u, %2u, %#010x },\n",
f[i].code, f[i].jt, f[i].jf, f[i].k);
}
static bool bpf_runnable(struct sock_filter *f, unsigned int len)
{
int sock, ret, i;
struct sock_fprog bpf = {
.filter = f,
.len = len,
};
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
rl_printf("cannot open socket!\n");
return false;
}
ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf));
close(sock);
if (ret < 0) {
rl_printf("program not allowed to run by kernel!\n");
return false;
}
for (i = 0; i < len; i++) {
if (BPF_CLASS(f[i].code) == BPF_LD &&
f[i].k > SKF_AD_OFF) {
rl_printf("extensions currently not supported!\n");
return false;
}
}
return true;
}
static void bpf_reset_breakpoints(void)
{
int i;
for (i = 0; i < array_size(bpf_breakpoints); i++)
bpf_breakpoints[i] = -1;
}
static void bpf_set_breakpoints(unsigned int where)
{
int i;
bool set = false;
for (i = 0; i < array_size(bpf_breakpoints); i++) {
if (bpf_breakpoints[i] == (int) where) {
rl_printf("breakpoint already set!\n");
set = true;
break;
}
if (bpf_breakpoints[i] == -1 && set == false) {
bpf_breakpoints[i] = where;
set = true;
}
}
if (!set)
rl_printf("too many breakpoints set, reset first!\n");
}
static void bpf_dump_breakpoints(void)
{
int i;
rl_printf("breakpoints: ");
for (i = 0; i < array_size(bpf_breakpoints); i++) {
if (bpf_breakpoints[i] < 0)
continue;
rl_printf("%d ", bpf_breakpoints[i]);
}
rl_printf("\n");
}
static void bpf_reset(void)
{
bpf_regs_len = 0;
memset(bpf_regs, 0, sizeof(bpf_regs));
memset(&bpf_curr, 0, sizeof(bpf_curr));
}
static void bpf_safe_regs(void)
{
memcpy(&bpf_regs[bpf_regs_len++], &bpf_curr, sizeof(bpf_curr));
}
static bool bpf_restore_regs(int off)
{
unsigned int index = bpf_regs_len - 1 + off;
if (index == 0) {
bpf_reset();
return true;
} else if (index < bpf_regs_len) {
memcpy(&bpf_curr, &bpf_regs[index], sizeof(bpf_curr));
bpf_regs_len = index;
return true;
} else {
rl_printf("reached bottom of register history stack!\n");
return false;
}
}
static uint32_t extract_u32(uint8_t *pkt, uint32_t off)
{
uint32_t r;
memcpy(&r, &pkt[off], sizeof(r));
return ntohl(r);
}
static uint16_t extract_u16(uint8_t *pkt, uint32_t off)
{
uint16_t r;
memcpy(&r, &pkt[off], sizeof(r));
return ntohs(r);
}
static uint8_t extract_u8(uint8_t *pkt, uint32_t off)
{
return pkt[off];
}
static void set_return(struct bpf_regs *r)
{
r->R = 0;
r->Rs = true;
}
static void bpf_single_step(struct bpf_regs *r, struct sock_filter *f,
uint8_t *pkt, uint32_t pkt_caplen,
uint32_t pkt_len)
{
uint32_t K = f->k;
int d;
switch (f->code) {
case BPF_RET | BPF_K:
r->R = K;
r->Rs = true;
break;
case BPF_RET | BPF_A:
r->R = r->A;
r->Rs = true;
break;
case BPF_RET | BPF_X:
r->R = r->X;
r->Rs = true;
break;
case BPF_MISC_TAX:
r->X = r->A;
break;
case BPF_MISC_TXA:
r->A = r->X;
break;
case BPF_ST:
r->M[K] = r->A;
break;
case BPF_STX:
r->M[K] = r->X;
break;
case BPF_LD_W | BPF_ABS:
d = pkt_caplen - K;
if (d >= sizeof(uint32_t))
r->A = extract_u32(pkt, K);
else
set_return(r);
break;
case BPF_LD_H | BPF_ABS:
d = pkt_caplen - K;
if (d >= sizeof(uint16_t))
r->A = extract_u16(pkt, K);
else
set_return(r);
break;
case BPF_LD_B | BPF_ABS:
d = pkt_caplen - K;
if (d >= sizeof(uint8_t))
r->A = extract_u8(pkt, K);
else
set_return(r);
break;
case BPF_LD_W | BPF_IND:
d = pkt_caplen - (r->X + K);
if (d >= sizeof(uint32_t))
r->A = extract_u32(pkt, r->X + K);
break;
case BPF_LD_H | BPF_IND:
d = pkt_caplen - (r->X + K);
if (d >= sizeof(uint16_t))
r->A = extract_u16(pkt, r->X + K);
else
set_return(r);
break;
case BPF_LD_B | BPF_IND:
d = pkt_caplen - (r->X + K);
if (d >= sizeof(uint8_t))
r->A = extract_u8(pkt, r->X + K);
else
set_return(r);
break;
case BPF_LDX_B | BPF_MSH:
d = pkt_caplen - K;
if (d >= sizeof(uint8_t)) {
r->X = extract_u8(pkt, K);
r->X = (r->X & 0xf) << 2;
} else
set_return(r);
break;
case BPF_LD_W | BPF_LEN:
r->A = pkt_len;
break;
case BPF_LDX_W | BPF_LEN:
r->A = pkt_len;
break;
case BPF_LD | BPF_IMM:
r->A = K;
break;
case BPF_LDX | BPF_IMM:
r->X = K;
break;
case BPF_LD | BPF_MEM:
r->A = r->M[K];
break;
case BPF_LDX | BPF_MEM:
r->X = r->M[K];
break;
case BPF_JMP_JA:
r->Pc += K;
break;
case BPF_JMP_JGT | BPF_X:
r->Pc += r->A > r->X ? f->jt : f->jf;
break;
case BPF_JMP_JGT | BPF_K:
r->Pc += r->A > K ? f->jt : f->jf;
break;
case BPF_JMP_JGE | BPF_X:
r->Pc += r->A >= r->X ? f->jt : f->jf;
break;
case BPF_JMP_JGE | BPF_K:
r->Pc += r->A >= K ? f->jt : f->jf;
break;
case BPF_JMP_JEQ | BPF_X:
r->Pc += r->A == r->X ? f->jt : f->jf;
break;
case BPF_JMP_JEQ | BPF_K:
r->Pc += r->A == K ? f->jt : f->jf;
break;
case BPF_JMP_JSET | BPF_X:
r->Pc += r->A & r->X ? f->jt : f->jf;
break;
case BPF_JMP_JSET | BPF_K:
r->Pc += r->A & K ? f->jt : f->jf;
break;
case BPF_ALU_NEG:
r->A = -r->A;
break;
case BPF_ALU_LSH | BPF_X:
r->A <<= r->X;
break;
case BPF_ALU_LSH | BPF_K:
r->A <<= K;
break;
case BPF_ALU_RSH | BPF_X:
r->A >>= r->X;
break;
case BPF_ALU_RSH | BPF_K:
r->A >>= K;
break;
case BPF_ALU_ADD | BPF_X:
r->A += r->X;
break;
case BPF_ALU_ADD | BPF_K:
r->A += K;
break;
case BPF_ALU_SUB | BPF_X:
r->A -= r->X;
break;
case BPF_ALU_SUB | BPF_K:
r->A -= K;
break;
case BPF_ALU_MUL | BPF_X:
r->A *= r->X;
break;
case BPF_ALU_MUL | BPF_K:
r->A *= K;
break;
case BPF_ALU_DIV | BPF_X:
case BPF_ALU_MOD | BPF_X:
if (r->X == 0) {
set_return(r);
break;
}
goto do_div;
case BPF_ALU_DIV | BPF_K:
case BPF_ALU_MOD | BPF_K:
if (K == 0) {
set_return(r);
break;
}
do_div:
switch (f->code) {
case BPF_ALU_DIV | BPF_X:
r->A /= r->X;
break;
case BPF_ALU_DIV | BPF_K:
r->A /= K;
break;
case BPF_ALU_MOD | BPF_X:
r->A %= r->X;
break;
case BPF_ALU_MOD | BPF_K:
r->A %= K;
break;
}
break;
case BPF_ALU_AND | BPF_X:
r->A &= r->X;
break;
case BPF_ALU_AND | BPF_K:
r->A &= K;
break;
case BPF_ALU_OR | BPF_X:
r->A |= r->X;
break;
case BPF_ALU_OR | BPF_K:
r->A |= K;
break;
case BPF_ALU_XOR | BPF_X:
r->A ^= r->X;
break;
case BPF_ALU_XOR | BPF_K:
r->A ^= K;
break;
}
}
static bool bpf_pc_has_breakpoint(uint16_t pc)
{
int i;
for (i = 0; i < array_size(bpf_breakpoints); i++) {
if (bpf_breakpoints[i] < 0)
continue;
if (bpf_breakpoints[i] == pc)
return true;
}
return false;
}
static bool bpf_handle_breakpoint(struct bpf_regs *r, struct sock_filter *f,
uint8_t *pkt, uint32_t pkt_caplen,
uint32_t pkt_len)
{
rl_printf("-- register dump --\n");
bpf_dump_curr(r, &f[r->Pc]);
rl_printf("-- packet dump --\n");
bpf_dump_pkt(pkt, pkt_caplen, pkt_len);
rl_printf("(breakpoint)\n");
return true;
}
static int bpf_run_all(struct sock_filter *f, uint16_t bpf_len, uint8_t *pkt,
uint32_t pkt_caplen, uint32_t pkt_len)
{
bool stop = false;
while (bpf_curr.Rs == false && stop == false) {
bpf_safe_regs();
if (bpf_pc_has_breakpoint(bpf_curr.Pc))
stop = bpf_handle_breakpoint(&bpf_curr, f, pkt,
pkt_caplen, pkt_len);
bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen,
pkt_len);
bpf_curr.Pc++;
}
return stop ? -1 : bpf_curr.R;
}
static int bpf_run_stepping(struct sock_filter *f, uint16_t bpf_len,
uint8_t *pkt, uint32_t pkt_caplen,
uint32_t pkt_len, int next)
{
bool stop = false;
int i = 1;
while (!bpf_curr.Rs && !stop) {
bpf_safe_regs();
if (i++ == next)
stop = bpf_handle_breakpoint(&bpf_curr, f, pkt,
pkt_caplen, pkt_len);
bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen,
pkt_len);
bpf_curr.Pc++;
}
return stop ? -1 : bpf_curr.R;
}
static bool pcap_loaded(void)
{
if (pcap_fd < 0)
rl_printf("no pcap file loaded!\n");
return pcap_fd >= 0;
}
static struct pcap_pkthdr *pcap_curr_pkt(void)
{
return (void *) pcap_ptr_va_curr;
}
static bool pcap_next_pkt(void)
{
struct pcap_pkthdr *hdr = pcap_curr_pkt();
if (pcap_ptr_va_curr + sizeof(*hdr) -
pcap_ptr_va_start >= pcap_map_size)
return false;
if (hdr->caplen == 0 || hdr->len == 0 || hdr->caplen > hdr->len)
return false;
if (pcap_ptr_va_curr + sizeof(*hdr) + hdr->caplen -
pcap_ptr_va_start >= pcap_map_size)
return false;
pcap_ptr_va_curr += (sizeof(*hdr) + hdr->caplen);
return true;
}
static void pcap_reset_pkt(void)
{
pcap_ptr_va_curr = pcap_ptr_va_start + sizeof(struct pcap_filehdr);
}
static int try_load_pcap(const char *file)
{
struct pcap_filehdr *hdr;
struct stat sb;
int ret;
pcap_fd = open(file, O_RDONLY);
if (pcap_fd < 0) {
rl_printf("cannot open pcap [%s]!\n", strerror(errno));
return CMD_ERR;
}
ret = fstat(pcap_fd, &sb);
if (ret < 0) {
rl_printf("cannot fstat pcap file!\n");
return CMD_ERR;
}
if (!S_ISREG(sb.st_mode)) {
rl_printf("not a regular pcap file, duh!\n");
return CMD_ERR;
}
pcap_map_size = sb.st_size;
if (pcap_map_size <= sizeof(struct pcap_filehdr)) {
rl_printf("pcap file too small!\n");
return CMD_ERR;
}
pcap_ptr_va_start = mmap(NULL, pcap_map_size, PROT_READ,
MAP_SHARED | MAP_LOCKED, pcap_fd, 0);
if (pcap_ptr_va_start == MAP_FAILED) {
rl_printf("mmap of file failed!");
return CMD_ERR;
}
hdr = (void *) pcap_ptr_va_start;
if (hdr->magic != TCPDUMP_MAGIC) {
rl_printf("wrong pcap magic!\n");
return CMD_ERR;
}
pcap_reset_pkt();
return CMD_OK;
}
static void try_close_pcap(void)
{
if (pcap_fd >= 0) {
munmap(pcap_ptr_va_start, pcap_map_size);
close(pcap_fd);
pcap_ptr_va_start = pcap_ptr_va_curr = NULL;
pcap_map_size = 0;
pcap_packet = 0;
pcap_fd = -1;
}
}
static int cmd_load_bpf(char *bpf_string)
{
char sp, *token, separator = ',';
unsigned short bpf_len, i = 0;
struct sock_filter tmp;
bpf_prog_len = 0;
memset(bpf_image, 0, sizeof(bpf_image));
if (sscanf(bpf_string, "%hu%c", &bpf_len, &sp) != 2 ||
sp != separator || bpf_len > BPF_MAXINSNS || bpf_len == 0) {
rl_printf("syntax error in head length encoding!\n");
return CMD_ERR;
}
token = bpf_string;
while ((token = strchr(token, separator)) && (++token)[0]) {
if (i >= bpf_len) {
rl_printf("program exceeds encoded length!\n");
return CMD_ERR;
}
if (sscanf(token, "%hu %hhu %hhu %u,",
&tmp.code, &tmp.jt, &tmp.jf, &tmp.k) != 4) {
rl_printf("syntax error at instruction %d!\n", i);
return CMD_ERR;
}
bpf_image[i].code = tmp.code;
bpf_image[i].jt = tmp.jt;
bpf_image[i].jf = tmp.jf;
bpf_image[i].k = tmp.k;
i++;
}
if (i != bpf_len) {
rl_printf("syntax error exceeding encoded length!\n");
return CMD_ERR;
} else
bpf_prog_len = bpf_len;
if (!bpf_runnable(bpf_image, bpf_prog_len))
bpf_prog_len = 0;
return CMD_OK;
}
static int cmd_load_pcap(char *file)
{
char *file_trim, *tmp;
file_trim = strtok_r(file, " ", &tmp);
if (file_trim == NULL)
return CMD_ERR;
try_close_pcap();
return try_load_pcap(file_trim);
}
static int cmd_load(char *arg)
{
char *subcmd, *cont = NULL, *tmp = strdup(arg);
int ret = CMD_OK;
subcmd = strtok_r(tmp, " ", &cont);
if (subcmd == NULL)
goto out;
if (matches(subcmd, "bpf") == 0) {
bpf_reset();
bpf_reset_breakpoints();
if (!cont)
ret = CMD_ERR;
else
ret = cmd_load_bpf(cont);
} else if (matches(subcmd, "pcap") == 0) {
ret = cmd_load_pcap(cont);
} else {
out:
rl_printf("bpf <code>: load bpf code\n");
rl_printf("pcap <file>: load pcap file\n");
ret = CMD_ERR;
}
free(tmp);
return ret;
}
static int cmd_step(char *num)
{
struct pcap_pkthdr *hdr;
int steps, ret;
if (!bpf_prog_loaded() || !pcap_loaded())
return CMD_ERR;
steps = strtol(num, NULL, 10);
if (steps == 0 || strlen(num) == 0)
steps = 1;
if (steps < 0) {
if (!bpf_restore_regs(steps))
return CMD_ERR;
steps = 1;
}
hdr = pcap_curr_pkt();
ret = bpf_run_stepping(bpf_image, bpf_prog_len,
(uint8_t *) hdr + sizeof(*hdr),
hdr->caplen, hdr->len, steps);
if (ret >= 0 || bpf_curr.Rs) {
bpf_reset();
if (!pcap_next_pkt()) {
rl_printf("(going back to first packet)\n");
pcap_reset_pkt();
} else {
rl_printf("(next packet)\n");
}
}
return CMD_OK;
}
static int cmd_select(char *num)
{
unsigned int which, i;
bool have_next = true;
if (!pcap_loaded() || strlen(num) == 0)
return CMD_ERR;
which = strtoul(num, NULL, 10);
if (which == 0) {
rl_printf("packet count starts with 1, clamping!\n");
which = 1;
}
pcap_reset_pkt();
bpf_reset();
for (i = 0; i < which && (have_next = pcap_next_pkt()); i++)
/* noop */;
if (!have_next || pcap_curr_pkt() == NULL) {
rl_printf("no packet #%u available!\n", which);
pcap_reset_pkt();
return CMD_ERR;
}
return CMD_OK;
}
static int cmd_breakpoint(char *subcmd)
{
if (!bpf_prog_loaded())
return CMD_ERR;
if (strlen(subcmd) == 0)
bpf_dump_breakpoints();
else if (matches(subcmd, "reset") == 0)
bpf_reset_breakpoints();
else {
unsigned int where = strtoul(subcmd, NULL, 10);
if (where < bpf_prog_len) {
bpf_set_breakpoints(where);
rl_printf("breakpoint at: ");
bpf_disasm(bpf_image[where], where);
}
}
return CMD_OK;
}
static int cmd_run(char *num)
{
static uint32_t pass, fail;
bool has_limit = true;
int pkts = 0, i = 0;
if (!bpf_prog_loaded() || !pcap_loaded())
return CMD_ERR;
pkts = strtol(num, NULL, 10);
if (pkts == 0 || strlen(num) == 0)
has_limit = false;
do {
struct pcap_pkthdr *hdr = pcap_curr_pkt();
int ret = bpf_run_all(bpf_image, bpf_prog_len,
(uint8_t *) hdr + sizeof(*hdr),
hdr->caplen, hdr->len);
if (ret > 0)
pass++;
else if (ret == 0)
fail++;
else
return CMD_OK;
bpf_reset();
} while (pcap_next_pkt() && (!has_limit || (++i < pkts)));
rl_printf("bpf passes:%u fails:%u\n", pass, fail);
pcap_reset_pkt();
bpf_reset();
pass = fail = 0;
return CMD_OK;
}
static int cmd_disassemble(char *line_string)
{
bool single_line = false;
unsigned long line;
if (!bpf_prog_loaded())
return CMD_ERR;
if (strlen(line_string) > 0 &&
(line = strtoul(line_string, NULL, 10)) < bpf_prog_len)
single_line = true;
if (single_line)
bpf_disasm(bpf_image[line], line);
else
bpf_disasm_all(bpf_image, bpf_prog_len);
return CMD_OK;
}
static int cmd_dump(char *dontcare)
{
if (!bpf_prog_loaded())
return CMD_ERR;
bpf_dump_all(bpf_image, bpf_prog_len);
return CMD_OK;
}
static int cmd_quit(char *dontcare)
{
return CMD_EX;
}
static const struct shell_cmd cmds[] = {
{ .name = "load", .func = cmd_load },
{ .name = "select", .func = cmd_select },
{ .name = "step", .func = cmd_step },
{ .name = "run", .func = cmd_run },
{ .name = "breakpoint", .func = cmd_breakpoint },
{ .name = "disassemble", .func = cmd_disassemble },
{ .name = "dump", .func = cmd_dump },
{ .name = "quit", .func = cmd_quit },
};
static int execf(char *arg)
{
char *cmd, *cont, *tmp = strdup(arg);
int i, ret = 0, len;
cmd = strtok_r(tmp, " ", &cont);
if (cmd == NULL)
goto out;
len = strlen(cmd);
for (i = 0; i < array_size(cmds); i++) {
if (len != strlen(cmds[i].name))
continue;
if (strncmp(cmds[i].name, cmd, len) == 0) {
ret = cmds[i].func(cont);
break;
}
}
out:
free(tmp);
return ret;
}
static char *shell_comp_gen(const char *buf, int state)
{
static int list_index, len;
if (!state) {
list_index = 0;
len = strlen(buf);
}
for (; list_index < array_size(cmds); ) {
const char *name = cmds[list_index].name;
list_index++;
if (strncmp(name, buf, len) == 0)
return strdup(name);
}
return NULL;
}
static char **shell_completion(const char *buf, int start, int end)
{
char **matches = NULL;
if (start == 0)
matches = rl_completion_matches(buf, shell_comp_gen);
return matches;
}
static void intr_shell(int sig)
{
if (rl_end)
rl_kill_line(-1, 0);
rl_crlf();
rl_refresh_line(0, 0);
rl_free_line_state();
}
static void init_shell(FILE *fin, FILE *fout)
{
char file[128];
snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME"));
read_history(file);
rl_instream = fin;
rl_outstream = fout;
rl_readline_name = "bpf_dbg";
rl_terminal_name = getenv("TERM");
rl_catch_signals = 0;
rl_catch_sigwinch = 1;
rl_attempted_completion_function = shell_completion;
rl_bind_key('\t', rl_complete);
rl_bind_key_in_map('\t', rl_complete, emacs_meta_keymap);
rl_bind_key_in_map('\033', rl_complete, emacs_meta_keymap);
snprintf(file, sizeof(file), "%s/.bpf_dbg_init", getenv("HOME"));
rl_read_init_file(file);
rl_prep_terminal(0);
rl_set_signals();
signal(SIGINT, intr_shell);
}
static void exit_shell(FILE *fin, FILE *fout)
{
char file[128];
snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME"));
write_history(file);
clear_history();
rl_deprep_terminal();
try_close_pcap();
if (fin != stdin)
fclose(fin);
if (fout != stdout)
fclose(fout);
}
static int run_shell_loop(FILE *fin, FILE *fout)
{
char *buf;
init_shell(fin, fout);
while ((buf = readline("> ")) != NULL) {
int ret = execf(buf);
if (ret == CMD_EX)
break;
if (ret == CMD_OK && strlen(buf) > 0)
add_history(buf);
free(buf);
}
exit_shell(fin, fout);
return 0;
}
int main(int argc, char **argv)
{
FILE *fin = NULL, *fout = NULL;
if (argc >= 2)
fin = fopen(argv[1], "r");
if (argc >= 3)
fout = fopen(argv[2], "w");
return run_shell_loop(fin ? : stdin, fout ? : stdout);
}
| linux-master | tools/bpf/bpf_dbg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Minimal BPF JIT image disassembler
*
* Disassembles BPF JIT compiler emitted opcodes back to asm insn's for
* debugging or verification purposes.
*
* To get the disassembly of the JIT code, do the following:
*
* 1) `echo 2 > /proc/sys/net/core/bpf_jit_enable`
* 2) Load a BPF filter (e.g. `tcpdump -p -n -s 0 -i eth1 host 192.168.20.0/24`)
* 3) Run e.g. `bpf_jit_disasm -o` to read out the last JIT code
*
* Copyright 2013 Daniel Borkmann <[email protected]>
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <unistd.h>
#include <string.h>
#include <bfd.h>
#include <dis-asm.h>
#include <regex.h>
#include <fcntl.h>
#include <sys/klog.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <limits.h>
#include <tools/dis-asm-compat.h>
#define CMD_ACTION_SIZE_BUFFER 10
#define CMD_ACTION_READ_ALL 3
static void get_exec_path(char *tpath, size_t size)
{
char *path;
ssize_t len;
snprintf(tpath, size, "/proc/%d/exe", (int) getpid());
tpath[size - 1] = 0;
path = strdup(tpath);
assert(path);
len = readlink(path, tpath, size);
tpath[len] = 0;
free(path);
}
static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
{
int count, i, pc = 0;
char tpath[PATH_MAX];
struct disassemble_info info;
disassembler_ftype disassemble;
bfd *bfdf;
memset(tpath, 0, sizeof(tpath));
get_exec_path(tpath, sizeof(tpath));
bfdf = bfd_openr(tpath, NULL);
assert(bfdf);
assert(bfd_check_format(bfdf, bfd_object));
init_disassemble_info_compat(&info, stdout,
(fprintf_ftype) fprintf,
fprintf_styled);
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
info.buffer = image;
info.buffer_length = len;
disassemble_init_for_target(&info);
#ifdef DISASM_FOUR_ARGS_SIGNATURE
disassemble = disassembler(info.arch,
bfd_big_endian(bfdf),
info.mach,
bfdf);
#else
disassemble = disassembler(bfdf);
#endif
assert(disassemble);
do {
printf("%4x:\t", pc);
count = disassemble(pc, &info);
if (opcodes) {
printf("\n\t");
for (i = 0; i < count; ++i)
printf("%02x ", (uint8_t) image[pc + i]);
}
printf("\n");
pc += count;
} while(count > 0 && pc < len);
bfd_close(bfdf);
}
static char *get_klog_buff(unsigned int *klen)
{
int ret, len;
char *buff;
len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
if (len < 0)
return NULL;
buff = malloc(len);
if (!buff)
return NULL;
ret = klogctl(CMD_ACTION_READ_ALL, buff, len);
if (ret < 0) {
free(buff);
return NULL;
}
*klen = ret;
return buff;
}
static char *get_flog_buff(const char *file, unsigned int *klen)
{
int fd, ret, len;
struct stat fi;
char *buff;
fd = open(file, O_RDONLY);
if (fd < 0)
return NULL;
ret = fstat(fd, &fi);
if (ret < 0 || !S_ISREG(fi.st_mode))
goto out;
len = fi.st_size + 1;
buff = malloc(len);
if (!buff)
goto out;
memset(buff, 0, len);
ret = read(fd, buff, len - 1);
if (ret <= 0)
goto out_free;
close(fd);
*klen = ret;
return buff;
out_free:
free(buff);
out:
close(fd);
return NULL;
}
static char *get_log_buff(const char *file, unsigned int *klen)
{
return file ? get_flog_buff(file, klen) : get_klog_buff(klen);
}
static void put_log_buff(char *buff)
{
free(buff);
}
static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
unsigned int *ilen)
{
char *ptr, *pptr, *tmp;
off_t off = 0;
unsigned int proglen;
int ret, flen, pass, ulen = 0;
regmatch_t pmatch[1];
unsigned long base;
regex_t regex;
uint8_t *image;
if (hlen == 0)
return NULL;
ret = regcomp(®ex, "flen=[[:alnum:]]+ proglen=[[:digit:]]+ "
"pass=[[:digit:]]+ image=[[:xdigit:]]+", REG_EXTENDED);
assert(ret == 0);
ptr = haystack;
memset(pmatch, 0, sizeof(pmatch));
while (1) {
ret = regexec(®ex, ptr, 1, pmatch, 0);
if (ret == 0) {
ptr += pmatch[0].rm_eo;
off += pmatch[0].rm_eo;
assert(off < hlen);
} else
break;
}
ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
ret = sscanf(ptr, "flen=%d proglen=%u pass=%d image=%lx",
&flen, &proglen, &pass, &base);
if (ret != 4) {
regfree(®ex);
return NULL;
}
if (proglen > 1000000) {
printf("proglen of %d too big, stopping\n", proglen);
return NULL;
}
image = malloc(proglen);
if (!image) {
printf("Out of memory\n");
return NULL;
}
memset(image, 0, proglen);
tmp = ptr = haystack + off;
while ((ptr = strtok(tmp, "\n")) != NULL && ulen < proglen) {
tmp = NULL;
if (!strstr(ptr, "JIT code"))
continue;
pptr = ptr;
while ((ptr = strstr(pptr, ":")))
pptr = ptr + 1;
ptr = pptr;
do {
image[ulen++] = (uint8_t) strtoul(pptr, &pptr, 16);
if (ptr == pptr) {
ulen--;
break;
}
if (ulen >= proglen)
break;
ptr = pptr;
} while (1);
}
assert(ulen == proglen);
printf("%u bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
proglen, pass, flen);
printf("%lx + <x>:\n", base);
regfree(®ex);
*ilen = ulen;
return image;
}
static void usage(void)
{
printf("Usage: bpf_jit_disasm [...]\n");
printf(" -o Also display related opcodes (default: off).\n");
printf(" -O <file> Write binary image of code to file, don't disassemble to stdout.\n");
printf(" -f <file> Read last image dump from file or stdin (default: klog).\n");
printf(" -h Display this help.\n");
}
int main(int argc, char **argv)
{
unsigned int len, klen, opt, opcodes = 0;
char *kbuff, *file = NULL;
char *ofile = NULL;
int ofd;
ssize_t nr;
uint8_t *pos;
uint8_t *image = NULL;
while ((opt = getopt(argc, argv, "of:O:")) != -1) {
switch (opt) {
case 'o':
opcodes = 1;
break;
case 'O':
ofile = optarg;
break;
case 'f':
file = optarg;
break;
default:
usage();
return -1;
}
}
bfd_init();
kbuff = get_log_buff(file, &klen);
if (!kbuff) {
fprintf(stderr, "Could not retrieve log buffer!\n");
return -1;
}
image = get_last_jit_image(kbuff, klen, &len);
if (!image) {
fprintf(stderr, "No JIT image found!\n");
goto done;
}
if (!ofile) {
get_asm_insns(image, len, opcodes);
goto done;
}
ofd = open(ofile, O_WRONLY | O_CREAT | O_TRUNC, DEFFILEMODE);
if (ofd < 0) {
fprintf(stderr, "Could not open file %s for writing: ", ofile);
perror(NULL);
goto done;
}
pos = image;
do {
nr = write(ofd, pos, len);
if (nr < 0) {
fprintf(stderr, "Could not write data to %s: ", ofile);
perror(NULL);
goto done;
}
len -= nr;
pos += nr;
} while (len);
close(ofd);
done:
put_log_buff(kbuff);
free(image);
return 0;
}
| linux-master | tools/bpf/bpf_jit_disasm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Minimal BPF assembler
*
* Instead of libpcap high-level filter expressions, it can be quite
* useful to define filters in low-level BPF assembler (that is kept
* close to Steven McCanne and Van Jacobson's original BPF paper).
* In particular for BPF JIT implementors, JIT security auditors, or
* just for defining BPF expressions that contain extensions which are
* not supported by compilers.
*
* How to get into it:
*
* 1) read Documentation/networking/filter.rst
* 2) Run `bpf_asm [-c] <filter-prog file>` to translate into binary
* blob that is loadable with xt_bpf, cls_bpf et al. Note: -c will
* pretty print a C-like construct.
*
* Copyright 2013 Daniel Borkmann <[email protected]>
*/
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
extern void bpf_asm_compile(FILE *fp, bool cstyle);
int main(int argc, char **argv)
{
FILE *fp = stdin;
bool cstyle = false;
int i;
for (i = 1; i < argc; i++) {
if (!strncmp("-c", argv[i], 2)) {
cstyle = true;
continue;
}
fp = fopen(argv[i], "r");
if (!fp) {
fp = stdin;
continue;
}
break;
}
bpf_asm_compile(fp, cstyle);
return 0;
}
| linux-master | tools/bpf/bpf_asm.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "runqslower.h"
#define TASK_RUNNING 0
#define BPF_F_CURRENT_CPU 0xffffffffULL
const volatile __u64 min_us = 0;
const volatile pid_t targ_pid = 0;
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, u64);
} start SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(u32));
} events SEC(".maps");
/* record enqueue timestamp */
__always_inline
static int trace_enqueue(struct task_struct *t)
{
u32 pid = t->pid;
u64 *ptr;
if (!pid || (targ_pid && targ_pid != pid))
return 0;
ptr = bpf_task_storage_get(&start, t, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
return 0;
*ptr = bpf_ktime_get_ns();
return 0;
}
SEC("tp_btf/sched_wakeup")
int handle__sched_wakeup(u64 *ctx)
{
/* TP_PROTO(struct task_struct *p) */
struct task_struct *p = (void *)ctx[0];
return trace_enqueue(p);
}
SEC("tp_btf/sched_wakeup_new")
int handle__sched_wakeup_new(u64 *ctx)
{
/* TP_PROTO(struct task_struct *p) */
struct task_struct *p = (void *)ctx[0];
return trace_enqueue(p);
}
SEC("tp_btf/sched_switch")
int handle__sched_switch(u64 *ctx)
{
/* TP_PROTO(bool preempt, struct task_struct *prev,
* struct task_struct *next)
*/
struct task_struct *prev = (struct task_struct *)ctx[1];
struct task_struct *next = (struct task_struct *)ctx[2];
struct runq_event event = {};
u64 *tsp, delta_us;
long state;
u32 pid;
/* ivcsw: treat like an enqueue event and store timestamp */
if (prev->__state == TASK_RUNNING)
trace_enqueue(prev);
pid = next->pid;
/* For pid mismatch, save a bpf_task_storage_get */
if (!pid || (targ_pid && targ_pid != pid))
return 0;
/* fetch timestamp and calculate delta */
tsp = bpf_task_storage_get(&start, next, 0, 0);
if (!tsp)
return 0; /* missed enqueue */
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
if (min_us && delta_us <= min_us)
return 0;
event.pid = pid;
event.delta_us = delta_us;
bpf_get_current_comm(&event.task, sizeof(event.task));
/* output */
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
&event, sizeof(event));
bpf_task_storage_delete(&start, next);
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/bpf/runqslower/runqslower.bpf.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#include <argp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include "runqslower.h"
#include "runqslower.skel.h"
struct env {
pid_t pid;
__u64 min_us;
bool verbose;
} env = {
.min_us = 10000,
};
const char *argp_program_version = "runqslower 0.1";
const char *argp_program_bug_address = "<[email protected]>";
const char argp_program_doc[] =
"runqslower Trace long process scheduling delays.\n"
" For Linux, uses eBPF, BPF CO-RE, libbpf, BTF.\n"
"\n"
"This script traces high scheduling delays between tasks being\n"
"ready to run and them running on CPU after that.\n"
"\n"
"USAGE: runqslower [-p PID] [min_us]\n"
"\n"
"EXAMPLES:\n"
" runqslower # trace run queue latency higher than 10000 us (default)\n"
" runqslower 1000 # trace run queue latency higher than 1000 us\n"
" runqslower -p 123 # trace pid 123 only\n";
static const struct argp_option opts[] = {
{ "pid", 'p', "PID", 0, "Process PID to trace"},
{ "verbose", 'v', NULL, 0, "Verbose debug output" },
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
static int pos_args;
int pid;
long long min_us;
switch (key) {
case 'v':
env.verbose = true;
break;
case 'p':
errno = 0;
pid = strtol(arg, NULL, 10);
if (errno || pid <= 0) {
fprintf(stderr, "Invalid PID: %s\n", arg);
argp_usage(state);
}
env.pid = pid;
break;
case ARGP_KEY_ARG:
if (pos_args++) {
fprintf(stderr,
"Unrecognized positional argument: %s\n", arg);
argp_usage(state);
}
errno = 0;
min_us = strtoll(arg, NULL, 10);
if (errno || min_us <= 0) {
fprintf(stderr, "Invalid delay (in us): %s\n", arg);
argp_usage(state);
}
env.min_us = min_us;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args)
{
if (level == LIBBPF_DEBUG && !env.verbose)
return 0;
return vfprintf(stderr, format, args);
}
void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
{
const struct runq_event *e = data;
struct tm *tm;
char ts[32];
time_t t;
time(&t);
tm = localtime(&t);
strftime(ts, sizeof(ts), "%H:%M:%S", tm);
printf("%-8s %-16s %-6d %14llu\n", ts, e->task, e->pid, e->delta_us);
}
void handle_lost_events(void *ctx, int cpu, __u64 lost_cnt)
{
printf("Lost %llu events on CPU #%d!\n", lost_cnt, cpu);
}
int main(int argc, char **argv)
{
static const struct argp argp = {
.options = opts,
.parser = parse_arg,
.doc = argp_program_doc,
};
struct perf_buffer *pb = NULL;
struct runqslower_bpf *obj;
int err;
err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
if (err)
return err;
libbpf_set_print(libbpf_print_fn);
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
obj = runqslower_bpf__open();
if (!obj) {
fprintf(stderr, "failed to open and/or load BPF object\n");
return 1;
}
/* initialize global data (filtering options) */
obj->rodata->targ_pid = env.pid;
obj->rodata->min_us = env.min_us;
err = runqslower_bpf__load(obj);
if (err) {
fprintf(stderr, "failed to load BPF object: %d\n", err);
goto cleanup;
}
err = runqslower_bpf__attach(obj);
if (err) {
fprintf(stderr, "failed to attach BPF programs\n");
goto cleanup;
}
printf("Tracing run queue latency higher than %llu us\n", env.min_us);
printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)");
pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64,
handle_event, handle_lost_events, NULL, NULL);
err = libbpf_get_error(pb);
if (err) {
pb = NULL;
fprintf(stderr, "failed to open perf buffer: %d\n", err);
goto cleanup;
}
while ((err = perf_buffer__poll(pb, 100)) >= 0)
;
printf("Error polling perf buffer: %d\n", err);
cleanup:
perf_buffer__free(pb);
runqslower_bpf__destroy(obj);
return err != 0;
}
| linux-master | tools/bpf/runqslower/runqslower.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* resolve_btfids scans ELF object for .BTF_ids section and resolves
* its symbols with BTF ID values.
*
* Each symbol points to 4 bytes data and is expected to have
* following name syntax:
*
* __BTF_ID__<type>__<symbol>[__<id>]
*
* type is:
*
* func - lookup BTF_KIND_FUNC symbol with <symbol> name
* and store its ID into the data:
*
* __BTF_ID__func__vfs_close__1:
* .zero 4
*
* struct - lookup BTF_KIND_STRUCT symbol with <symbol> name
* and store its ID into the data:
*
* __BTF_ID__struct__sk_buff__1:
* .zero 4
*
* union - lookup BTF_KIND_UNION symbol with <symbol> name
* and store its ID into the data:
*
* __BTF_ID__union__thread_union__1:
* .zero 4
*
* typedef - lookup BTF_KIND_TYPEDEF symbol with <symbol> name
* and store its ID into the data:
*
* __BTF_ID__typedef__pid_t__1:
* .zero 4
*
* set - store symbol size into first 4 bytes and sort following
* ID list
*
* __BTF_ID__set__list:
* .zero 4
* list:
* __BTF_ID__func__vfs_getattr__3:
* .zero 4
* __BTF_ID__func__vfs_fallocate__4:
* .zero 4
*
* set8 - store symbol size into first 4 bytes and sort following
* ID list
*
* __BTF_ID__set8__list:
* .zero 8
* list:
* __BTF_ID__func__vfs_getattr__3:
* .zero 4
* .word (1 << 0) | (1 << 2)
* __BTF_ID__func__vfs_fallocate__5:
* .zero 4
* .word (1 << 3) | (1 << 1) | (1 << 2)
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <libelf.h>
#include <gelf.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <linux/rbtree.h>
#include <linux/zalloc.h>
#include <linux/err.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <subcmd/parse-options.h>
#define BTF_IDS_SECTION ".BTF_ids"
#define BTF_ID "__BTF_ID__"
#define BTF_STRUCT "struct"
#define BTF_UNION "union"
#define BTF_TYPEDEF "typedef"
#define BTF_FUNC "func"
#define BTF_SET "set"
#define BTF_SET8 "set8"
#define ADDR_CNT 100
struct btf_id {
struct rb_node rb_node;
char *name;
union {
int id;
int cnt;
};
int addr_cnt;
bool is_set;
bool is_set8;
Elf64_Addr addr[ADDR_CNT];
};
struct object {
const char *path;
const char *btf;
const char *base_btf_path;
struct {
int fd;
Elf *elf;
Elf_Data *symbols;
Elf_Data *idlist;
int symbols_shndx;
int idlist_shndx;
size_t strtabidx;
unsigned long idlist_addr;
} efile;
struct rb_root sets;
struct rb_root structs;
struct rb_root unions;
struct rb_root typedefs;
struct rb_root funcs;
int nr_funcs;
int nr_structs;
int nr_unions;
int nr_typedefs;
};
static int verbose;
static int eprintf(int level, int var, const char *fmt, ...)
{
va_list args;
int ret = 0;
if (var >= level) {
va_start(args, fmt);
ret = vfprintf(stderr, fmt, args);
va_end(args);
}
return ret;
}
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
#endif
#define pr_debug(fmt, ...) \
eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debugN(n, fmt, ...) \
eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_err(fmt, ...) \
eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
static bool is_btf_id(const char *name)
{
return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
}
static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
{
struct rb_node *p = root->rb_node;
struct btf_id *id;
int cmp;
while (p) {
id = rb_entry(p, struct btf_id, rb_node);
cmp = strcmp(id->name, name);
if (cmp < 0)
p = p->rb_left;
else if (cmp > 0)
p = p->rb_right;
else
return id;
}
return NULL;
}
static struct btf_id *
btf_id__add(struct rb_root *root, char *name, bool unique)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct btf_id *id;
int cmp;
while (*p != NULL) {
parent = *p;
id = rb_entry(parent, struct btf_id, rb_node);
cmp = strcmp(id->name, name);
if (cmp < 0)
p = &(*p)->rb_left;
else if (cmp > 0)
p = &(*p)->rb_right;
else
return unique ? NULL : id;
}
id = zalloc(sizeof(*id));
if (id) {
pr_debug("adding symbol %s\n", name);
id->name = name;
rb_link_node(&id->rb_node, parent, p);
rb_insert_color(&id->rb_node, root);
}
return id;
}
static char *get_id(const char *prefix_end)
{
/*
* __BTF_ID__func__vfs_truncate__0
* prefix_end = ^
* pos = ^
*/
int len = strlen(prefix_end);
int pos = sizeof("__") - 1;
char *p, *id;
if (pos >= len)
return NULL;
id = strdup(prefix_end + pos);
if (id) {
/*
* __BTF_ID__func__vfs_truncate__0
* id = ^
*
* cut the unique id part
*/
p = strrchr(id, '_');
p--;
if (*p != '_') {
free(id);
return NULL;
}
*p = '\0';
}
return id;
}
static struct btf_id *add_set(struct object *obj, char *name, bool is_set8)
{
/*
* __BTF_ID__set__name
* name = ^
* id = ^
*/
char *id = name + (is_set8 ? sizeof(BTF_SET8 "__") : sizeof(BTF_SET "__")) - 1;
int len = strlen(name);
if (id >= name + len) {
pr_err("FAILED to parse set name: %s\n", name);
return NULL;
}
return btf_id__add(&obj->sets, id, true);
}
static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
{
char *id;
id = get_id(name + size);
if (!id) {
pr_err("FAILED to parse symbol name: %s\n", name);
return NULL;
}
return btf_id__add(root, id, false);
}
/* Older libelf.h and glibc elf.h might not yet define the ELF compression types. */
#ifndef SHF_COMPRESSED
#define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */
#endif
/*
* The data of compressed section should be aligned to 4
* (for 32bit) or 8 (for 64 bit) bytes. The binutils ld
* sets sh_addralign to 1, which makes libelf fail with
* misaligned section error during the update:
* FAILED elf_update(WRITE): invalid section alignment
*
* While waiting for ld fix, we fix the compressed sections
* sh_addralign value manualy.
*/
static int compressed_section_fix(Elf *elf, Elf_Scn *scn, GElf_Shdr *sh)
{
int expected = gelf_getclass(elf) == ELFCLASS32 ? 4 : 8;
if (!(sh->sh_flags & SHF_COMPRESSED))
return 0;
if (sh->sh_addralign == expected)
return 0;
pr_debug2(" - fixing wrong alignment sh_addralign %u, expected %u\n",
sh->sh_addralign, expected);
sh->sh_addralign = expected;
if (gelf_update_shdr(scn, sh) == 0) {
pr_err("FAILED cannot update section header: %s\n",
elf_errmsg(-1));
return -1;
}
return 0;
}
static int elf_collect(struct object *obj)
{
Elf_Scn *scn = NULL;
size_t shdrstrndx;
int idx = 0;
Elf *elf;
int fd;
fd = open(obj->path, O_RDWR, 0666);
if (fd == -1) {
pr_err("FAILED cannot open %s: %s\n",
obj->path, strerror(errno));
return -1;
}
elf_version(EV_CURRENT);
elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL);
if (!elf) {
close(fd);
pr_err("FAILED cannot create ELF descriptor: %s\n",
elf_errmsg(-1));
return -1;
}
obj->efile.fd = fd;
obj->efile.elf = elf;
elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT);
if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) {
pr_err("FAILED cannot get shdr str ndx\n");
return -1;
}
/*
* Scan all the elf sections and look for save data
* from .BTF_ids section and symbols.
*/
while ((scn = elf_nextscn(elf, scn)) != NULL) {
Elf_Data *data;
GElf_Shdr sh;
char *name;
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
pr_err("FAILED get section(%d) header\n", idx);
return -1;
}
name = elf_strptr(elf, shdrstrndx, sh.sh_name);
if (!name) {
pr_err("FAILED get section(%d) name\n", idx);
return -1;
}
data = elf_getdata(scn, 0);
if (!data) {
pr_err("FAILED to get section(%d) data from %s\n",
idx, name);
return -1;
}
pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
idx, name, (unsigned long) data->d_size,
(int) sh.sh_link, (unsigned long) sh.sh_flags,
(int) sh.sh_type);
if (sh.sh_type == SHT_SYMTAB) {
obj->efile.symbols = data;
obj->efile.symbols_shndx = idx;
obj->efile.strtabidx = sh.sh_link;
} else if (!strcmp(name, BTF_IDS_SECTION)) {
obj->efile.idlist = data;
obj->efile.idlist_shndx = idx;
obj->efile.idlist_addr = sh.sh_addr;
}
if (compressed_section_fix(elf, scn, &sh))
return -1;
}
return 0;
}
static int symbols_collect(struct object *obj)
{
Elf_Scn *scn = NULL;
int n, i;
GElf_Shdr sh;
char *name;
scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
if (!scn)
return -1;
if (gelf_getshdr(scn, &sh) != &sh)
return -1;
n = sh.sh_size / sh.sh_entsize;
/*
* Scan symbols and look for the ones starting with
* __BTF_ID__* over .BTF_ids section.
*/
for (i = 0; i < n; i++) {
char *prefix;
struct btf_id *id;
GElf_Sym sym;
if (!gelf_getsym(obj->efile.symbols, i, &sym))
return -1;
if (sym.st_shndx != obj->efile.idlist_shndx)
continue;
name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
sym.st_name);
if (!is_btf_id(name))
continue;
/*
* __BTF_ID__TYPE__vfs_truncate__0
* prefix = ^
*/
prefix = name + sizeof(BTF_ID) - 1;
/* struct */
if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
obj->nr_structs++;
id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1);
/* union */
} else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) {
obj->nr_unions++;
id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1);
/* typedef */
} else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) {
obj->nr_typedefs++;
id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1);
/* func */
} else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) {
obj->nr_funcs++;
id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1);
/* set8 */
} else if (!strncmp(prefix, BTF_SET8, sizeof(BTF_SET8) - 1)) {
id = add_set(obj, prefix, true);
/*
* SET8 objects store list's count, which is encoded
* in symbol's size, together with 'cnt' field hence
* that - 1.
*/
if (id) {
id->cnt = sym.st_size / sizeof(uint64_t) - 1;
id->is_set8 = true;
}
/* set */
} else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) {
id = add_set(obj, prefix, false);
/*
* SET objects store list's count, which is encoded
* in symbol's size, together with 'cnt' field hence
* that - 1.
*/
if (id) {
id->cnt = sym.st_size / sizeof(int) - 1;
id->is_set = true;
}
} else {
pr_err("FAILED unsupported prefix %s\n", prefix);
return -1;
}
if (!id)
return -ENOMEM;
if (id->addr_cnt >= ADDR_CNT) {
pr_err("FAILED symbol %s crossed the number of allowed lists\n",
id->name);
return -1;
}
id->addr[id->addr_cnt++] = sym.st_value;
}
return 0;
}
static int symbols_resolve(struct object *obj)
{
int nr_typedefs = obj->nr_typedefs;
int nr_structs = obj->nr_structs;
int nr_unions = obj->nr_unions;
int nr_funcs = obj->nr_funcs;
struct btf *base_btf = NULL;
int err, type_id;
struct btf *btf;
__u32 nr_types;
if (obj->base_btf_path) {
base_btf = btf__parse(obj->base_btf_path, NULL);
err = libbpf_get_error(base_btf);
if (err) {
pr_err("FAILED: load base BTF from %s: %s\n",
obj->base_btf_path, strerror(-err));
return -1;
}
}
btf = btf__parse_split(obj->btf ?: obj->path, base_btf);
err = libbpf_get_error(btf);
if (err) {
pr_err("FAILED: load BTF from %s: %s\n",
obj->btf ?: obj->path, strerror(-err));
goto out;
}
err = -1;
nr_types = btf__type_cnt(btf);
/*
* Iterate all the BTF types and search for collected symbol IDs.
*/
for (type_id = 1; type_id < nr_types; type_id++) {
const struct btf_type *type;
struct rb_root *root;
struct btf_id *id;
const char *str;
int *nr;
type = btf__type_by_id(btf, type_id);
if (!type) {
pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n",
type_id);
goto out;
}
if (btf_is_func(type) && nr_funcs) {
nr = &nr_funcs;
root = &obj->funcs;
} else if (btf_is_struct(type) && nr_structs) {
nr = &nr_structs;
root = &obj->structs;
} else if (btf_is_union(type) && nr_unions) {
nr = &nr_unions;
root = &obj->unions;
} else if (btf_is_typedef(type) && nr_typedefs) {
nr = &nr_typedefs;
root = &obj->typedefs;
} else
continue;
str = btf__name_by_offset(btf, type->name_off);
if (!str) {
pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n",
type_id);
goto out;
}
id = btf_id__find(root, str);
if (id) {
if (id->id) {
pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n",
str, id->id, type_id, id->id);
} else {
id->id = type_id;
(*nr)--;
}
}
}
err = 0;
out:
btf__free(base_btf);
btf__free(btf);
return err;
}
static int id_patch(struct object *obj, struct btf_id *id)
{
Elf_Data *data = obj->efile.idlist;
int *ptr = data->d_buf;
int i;
/* For set, set8, id->id may be 0 */
if (!id->id && !id->is_set && !id->is_set8)
pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
for (i = 0; i < id->addr_cnt; i++) {
unsigned long addr = id->addr[i];
unsigned long idx = addr - obj->efile.idlist_addr;
pr_debug("patching addr %5lu: ID %7d [%s]\n",
idx, id->id, id->name);
if (idx >= data->d_size) {
pr_err("FAILED patching index %lu out of bounds %lu\n",
idx, data->d_size);
return -1;
}
idx = idx / sizeof(int);
ptr[idx] = id->id;
}
return 0;
}
static int __symbols_patch(struct object *obj, struct rb_root *root)
{
struct rb_node *next;
struct btf_id *id;
next = rb_first(root);
while (next) {
id = rb_entry(next, struct btf_id, rb_node);
if (id_patch(obj, id))
return -1;
next = rb_next(next);
}
return 0;
}
static int cmp_id(const void *pa, const void *pb)
{
const int *a = pa, *b = pb;
return *a - *b;
}
static int sets_patch(struct object *obj)
{
Elf_Data *data = obj->efile.idlist;
int *ptr = data->d_buf;
struct rb_node *next;
next = rb_first(&obj->sets);
while (next) {
unsigned long addr, idx;
struct btf_id *id;
int *base;
int cnt;
id = rb_entry(next, struct btf_id, rb_node);
addr = id->addr[0];
idx = addr - obj->efile.idlist_addr;
/* sets are unique */
if (id->addr_cnt != 1) {
pr_err("FAILED malformed data for set '%s'\n",
id->name);
return -1;
}
idx = idx / sizeof(int);
base = &ptr[idx] + (id->is_set8 ? 2 : 1);
cnt = ptr[idx];
pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
(idx + 1) * sizeof(int), cnt, id->name);
qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
next = rb_next(next);
}
return 0;
}
static int symbols_patch(struct object *obj)
{
int err;
if (__symbols_patch(obj, &obj->structs) ||
__symbols_patch(obj, &obj->unions) ||
__symbols_patch(obj, &obj->typedefs) ||
__symbols_patch(obj, &obj->funcs) ||
__symbols_patch(obj, &obj->sets))
return -1;
if (sets_patch(obj))
return -1;
/* Set type to ensure endian translation occurs. */
obj->efile.idlist->d_type = ELF_T_WORD;
elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
err = elf_update(obj->efile.elf, ELF_C_WRITE);
if (err < 0) {
pr_err("FAILED elf_update(WRITE): %s\n",
elf_errmsg(-1));
}
pr_debug("update %s for %s\n",
err >= 0 ? "ok" : "failed", obj->path);
return err < 0 ? -1 : 0;
}
static const char * const resolve_btfids_usage[] = {
"resolve_btfids [<options>] <ELF object>",
NULL
};
int main(int argc, const char **argv)
{
struct object obj = {
.efile = {
.idlist_shndx = -1,
.symbols_shndx = -1,
},
.structs = RB_ROOT,
.unions = RB_ROOT,
.typedefs = RB_ROOT,
.funcs = RB_ROOT,
.sets = RB_ROOT,
};
struct option btfid_options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show errors, etc)"),
OPT_STRING(0, "btf", &obj.btf, "BTF data",
"BTF data"),
OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
"path of file providing base BTF"),
OPT_END()
};
int err = -1;
argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (argc != 1)
usage_with_options(resolve_btfids_usage, btfid_options);
obj.path = argv[0];
if (elf_collect(&obj))
goto out;
/*
* We did not find .BTF_ids section or symbols section,
* nothing to do..
*/
if (obj.efile.idlist_shndx == -1 ||
obj.efile.symbols_shndx == -1) {
pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n");
err = 0;
goto out;
}
if (symbols_collect(&obj))
goto out;
if (symbols_resolve(&obj))
goto out;
if (symbols_patch(&obj))
goto out;
err = 0;
out:
if (obj.efile.elf) {
elf_end(obj.efile.elf);
close(obj.efile.fd);
}
return err;
}
| linux-master | tools/bpf/resolve_btfids/main.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <ftw.h>
#include <libgen.h>
#include <mntent.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <net/if.h>
#include <sys/mount.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <linux/filter.h>
#include <linux/limits.h>
#include <linux/magic.h>
#include <linux/unistd.h>
#include <bpf/bpf.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
#include <bpf/btf.h>
#include "main.h"
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC 0xcafe4a11
#endif
void p_err(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "error");
jsonw_vprintf_enquote(json_wtr, fmt, ap);
jsonw_end_object(json_wtr);
} else {
fprintf(stderr, "Error: ");
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
}
va_end(ap);
}
void p_info(const char *fmt, ...)
{
va_list ap;
if (json_output)
return;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
va_end(ap);
}
static bool is_bpffs(const char *path)
{
struct statfs st_fs;
if (statfs(path, &st_fs) < 0)
return false;
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
}
/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
* memcg-based memory accounting for BPF maps and programs. This was done in
* commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
* accounting'"), in Linux 5.11.
*
* Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
* so by checking for the availability of a given BPF helper and this has
* failed on some kernels with backports in the past, see commit 6b4384ff1088
* ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
* Instead, we can probe by lowering the process-based rlimit to 0, trying to
* load a BPF object, and resetting the rlimit. If the load succeeds then
* memcg-based accounting is supported.
*
* This would be too dangerous to do in the library, because multithreaded
* applications might attempt to load items while the rlimit is at 0. Given
* that bpftool is single-threaded, this is fine to do here.
*/
static bool known_to_need_rlimit(void)
{
struct rlimit rlim_init, rlim_cur_zero = {};
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
size_t insn_cnt = ARRAY_SIZE(insns);
union bpf_attr attr;
int prog_fd, err;
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
attr.insns = ptr_to_u64(insns);
attr.insn_cnt = insn_cnt;
attr.license = ptr_to_u64("GPL");
if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
return false;
/* Drop the soft limit to zero. We maintain the hard limit to its
* current value, because lowering it would be a permanent operation
* for unprivileged users.
*/
rlim_cur_zero.rlim_max = rlim_init.rlim_max;
if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
return false;
/* Do not use bpf_prog_load() from libbpf here, because it calls
* bump_rlimit_memlock(), interfering with the current probe.
*/
prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
err = errno;
/* reset soft rlimit to its initial value */
setrlimit(RLIMIT_MEMLOCK, &rlim_init);
if (prog_fd < 0)
return err == EPERM;
close(prog_fd);
return false;
}
void set_max_rlimit(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
if (known_to_need_rlimit())
setrlimit(RLIMIT_MEMLOCK, &rinf);
}
static int
mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
{
bool bind_done = false;
while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
if (errno != EINVAL || bind_done) {
snprintf(buff, bufflen,
"mount --make-private %s failed: %s",
target, strerror(errno));
return -1;
}
if (mount(target, target, "none", MS_BIND, NULL)) {
snprintf(buff, bufflen,
"mount --bind %s %s failed: %s",
target, target, strerror(errno));
return -1;
}
bind_done = true;
}
if (mount(type, target, type, 0, "mode=0700")) {
snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
type, type, target, strerror(errno));
return -1;
}
return 0;
}
int mount_tracefs(const char *target)
{
char err_str[ERR_MAX_LEN];
int err;
err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
if (err) {
err_str[ERR_MAX_LEN - 1] = '\0';
p_err("can't mount tracefs: %s", err_str);
}
return err;
}
int open_obj_pinned(const char *path, bool quiet)
{
char *pname;
int fd = -1;
pname = strdup(path);
if (!pname) {
if (!quiet)
p_err("mem alloc failed");
goto out_ret;
}
fd = bpf_obj_get(pname);
if (fd < 0) {
if (!quiet)
p_err("bpf obj get (%s): %s", pname,
errno == EACCES && !is_bpffs(dirname(pname)) ?
"directory not in bpf file system (bpffs)" :
strerror(errno));
goto out_free;
}
out_free:
free(pname);
out_ret:
return fd;
}
int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
{
enum bpf_obj_type type;
int fd;
fd = open_obj_pinned(path, false);
if (fd < 0)
return -1;
type = get_fd_type(fd);
if (type < 0) {
close(fd);
return type;
}
if (type != exp_type) {
p_err("incorrect object type: %s", get_fd_type_name(type));
close(fd);
return -1;
}
return fd;
}
int mount_bpffs_for_pin(const char *name, bool is_dir)
{
char err_str[ERR_MAX_LEN];
char *file;
char *dir;
int err = 0;
if (is_dir && is_bpffs(name))
return err;
file = malloc(strlen(name) + 1);
if (!file) {
p_err("mem alloc failed");
return -1;
}
strcpy(file, name);
dir = dirname(file);
if (is_bpffs(dir))
/* nothing to do if already mounted */
goto out_free;
if (block_mount) {
p_err("no BPF file system found, not mounting it due to --nomount option");
err = -1;
goto out_free;
}
err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
if (err) {
err_str[ERR_MAX_LEN - 1] = '\0';
p_err("can't mount BPF file system to pin the object (%s): %s",
name, err_str);
}
out_free:
free(file);
return err;
}
int do_pin_fd(int fd, const char *name)
{
int err;
err = mount_bpffs_for_pin(name, false);
if (err)
return err;
err = bpf_obj_pin(fd, name);
if (err)
p_err("can't pin the object (%s): %s", name, strerror(errno));
return err;
}
int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
{
int err;
int fd;
if (!REQ_ARGS(3))
return -EINVAL;
fd = get_fd(&argc, &argv);
if (fd < 0)
return fd;
err = do_pin_fd(fd, *argv);
close(fd);
return err;
}
const char *get_fd_type_name(enum bpf_obj_type type)
{
static const char * const names[] = {
[BPF_OBJ_UNKNOWN] = "unknown",
[BPF_OBJ_PROG] = "prog",
[BPF_OBJ_MAP] = "map",
[BPF_OBJ_LINK] = "link",
};
if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
return names[BPF_OBJ_UNKNOWN];
return names[type];
}
void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
char *name_buff, size_t buff_len)
{
const char *prog_name = prog_info->name;
const struct btf_type *func_type;
const struct bpf_func_info finfo = {};
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
struct btf *prog_btf = NULL;
if (buff_len <= BPF_OBJ_NAME_LEN ||
strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
goto copy_name;
if (!prog_info->btf_id || prog_info->nr_func_info == 0)
goto copy_name;
info.nr_func_info = 1;
info.func_info_rec_size = prog_info->func_info_rec_size;
if (info.func_info_rec_size > sizeof(finfo))
info.func_info_rec_size = sizeof(finfo);
info.func_info = ptr_to_u64(&finfo);
if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
goto copy_name;
prog_btf = btf__load_from_kernel_by_id(info.btf_id);
if (!prog_btf)
goto copy_name;
func_type = btf__type_by_id(prog_btf, finfo.type_id);
if (!func_type || !btf_is_func(func_type))
goto copy_name;
prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
copy_name:
snprintf(name_buff, buff_len, "%s", prog_name);
if (prog_btf)
btf__free(prog_btf);
}
int get_fd_type(int fd)
{
char path[PATH_MAX];
char buf[512];
ssize_t n;
snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
n = readlink(path, buf, sizeof(buf));
if (n < 0) {
p_err("can't read link type: %s", strerror(errno));
return -1;
}
if (n == sizeof(path)) {
p_err("can't read link type: path too long!");
return -1;
}
if (strstr(buf, "bpf-map"))
return BPF_OBJ_MAP;
else if (strstr(buf, "bpf-prog"))
return BPF_OBJ_PROG;
else if (strstr(buf, "bpf-link"))
return BPF_OBJ_LINK;
return BPF_OBJ_UNKNOWN;
}
char *get_fdinfo(int fd, const char *key)
{
char path[PATH_MAX];
char *line = NULL;
size_t line_n = 0;
ssize_t n;
FILE *fdi;
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
fdi = fopen(path, "r");
if (!fdi)
return NULL;
while ((n = getline(&line, &line_n, fdi)) > 0) {
char *value;
int len;
if (!strstr(line, key))
continue;
fclose(fdi);
value = strchr(line, '\t');
if (!value || !value[1]) {
free(line);
return NULL;
}
value++;
len = strlen(value);
memmove(line, value, len);
line[len - 1] = '\0';
return line;
}
free(line);
fclose(fdi);
return NULL;
}
void print_data_json(uint8_t *data, size_t len)
{
unsigned int i;
jsonw_start_array(json_wtr);
for (i = 0; i < len; i++)
jsonw_printf(json_wtr, "%d", data[i]);
jsonw_end_array(json_wtr);
}
void print_hex_data_json(uint8_t *data, size_t len)
{
unsigned int i;
jsonw_start_array(json_wtr);
for (i = 0; i < len; i++)
jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
jsonw_end_array(json_wtr);
}
/* extra params for nftw cb */
static struct hashmap *build_fn_table;
static enum bpf_obj_type build_fn_type;
static int do_build_table_cb(const char *fpath, const struct stat *sb,
int typeflag, struct FTW *ftwbuf)
{
struct bpf_prog_info pinned_info;
__u32 len = sizeof(pinned_info);
enum bpf_obj_type objtype;
int fd, err = 0;
char *path;
if (typeflag != FTW_F)
goto out_ret;
fd = open_obj_pinned(fpath, true);
if (fd < 0)
goto out_ret;
objtype = get_fd_type(fd);
if (objtype != build_fn_type)
goto out_close;
memset(&pinned_info, 0, sizeof(pinned_info));
if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
goto out_close;
path = strdup(fpath);
if (!path) {
err = -1;
goto out_close;
}
err = hashmap__append(build_fn_table, pinned_info.id, path);
if (err) {
p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
pinned_info.id, path, strerror(errno));
free(path);
goto out_close;
}
out_close:
close(fd);
out_ret:
return err;
}
int build_pinned_obj_table(struct hashmap *tab,
enum bpf_obj_type type)
{
struct mntent *mntent = NULL;
FILE *mntfile = NULL;
int flags = FTW_PHYS;
int nopenfd = 16;
int err = 0;
mntfile = setmntent("/proc/mounts", "r");
if (!mntfile)
return -1;
build_fn_table = tab;
build_fn_type = type;
while ((mntent = getmntent(mntfile))) {
char *path = mntent->mnt_dir;
if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
continue;
err = nftw(path, do_build_table_cb, nopenfd, flags);
if (err)
break;
}
fclose(mntfile);
return err;
}
void delete_pinned_obj_table(struct hashmap *map)
{
struct hashmap_entry *entry;
size_t bkt;
if (!map)
return;
hashmap__for_each_entry(map, entry, bkt)
free(entry->pvalue);
hashmap__free(map);
}
unsigned int get_page_size(void)
{
static int result;
if (!result)
result = getpagesize();
return result;
}
unsigned int get_possible_cpus(void)
{
int cpus = libbpf_num_possible_cpus();
if (cpus < 0) {
p_err("Can't get # of possible cpus: %s", strerror(-cpus));
exit(-1);
}
return cpus;
}
static char *
ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
{
struct stat st;
int err;
err = stat("/proc/self/ns/net", &st);
if (err) {
p_err("Can't stat /proc/self: %s", strerror(errno));
return NULL;
}
if (st.st_dev != ns_dev || st.st_ino != ns_ino)
return NULL;
return if_indextoname(ifindex, buf);
}
static int read_sysfs_hex_int(char *path)
{
char vendor_id_buf[8];
int len;
int fd;
fd = open(path, O_RDONLY);
if (fd < 0) {
p_err("Can't open %s: %s", path, strerror(errno));
return -1;
}
len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
close(fd);
if (len < 0) {
p_err("Can't read %s: %s", path, strerror(errno));
return -1;
}
if (len >= (int)sizeof(vendor_id_buf)) {
p_err("Value in %s too long", path);
return -1;
}
vendor_id_buf[len] = 0;
return strtol(vendor_id_buf, NULL, 0);
}
static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
{
char full_path[64];
snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
devname, entry_name);
return read_sysfs_hex_int(full_path);
}
const char *
ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
{
__maybe_unused int device_id;
char devname[IF_NAMESIZE];
int vendor_id;
if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
p_err("Can't get net device name for ifindex %d: %s", ifindex,
strerror(errno));
return NULL;
}
vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
if (vendor_id < 0) {
p_err("Can't get device vendor id for %s", devname);
return NULL;
}
switch (vendor_id) {
#ifdef HAVE_LIBBFD_SUPPORT
case 0x19ee:
device_id = read_sysfs_netdev_hex_int(devname, "device");
if (device_id != 0x4000 &&
device_id != 0x6000 &&
device_id != 0x6003)
p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
*opt = "ctx4";
return "NFP-6xxx";
#endif /* HAVE_LIBBFD_SUPPORT */
/* No NFP support in LLVM, we have no valid triple to return. */
default:
p_err("Can't get arch name for device vendor id 0x%04x",
vendor_id);
return NULL;
}
}
void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
{
char name[IF_NAMESIZE];
if (!ifindex)
return;
printf(" offloaded_to ");
if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
printf("%s", name);
else
printf("ifindex %u ns_dev %llu ns_ino %llu",
ifindex, ns_dev, ns_inode);
}
void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
{
char name[IF_NAMESIZE];
if (!ifindex)
return;
jsonw_name(json_wtr, "dev");
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "ifindex", ifindex);
jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
jsonw_string_field(json_wtr, "ifname", name);
jsonw_end_object(json_wtr);
}
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
{
char *endptr;
NEXT_ARGP();
if (*val) {
p_err("%s already specified", what);
return -1;
}
*val = strtoul(**argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as %s", **argv, what);
return -1;
}
NEXT_ARGP();
return 0;
}
int __printf(2, 0)
print_all_levels(__maybe_unused enum libbpf_print_level level,
const char *format, va_list args)
{
return vfprintf(stderr, format, args);
}
static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
{
char prog_name[MAX_PROG_FULL_NAME];
unsigned int id = 0;
int fd, nb_fds = 0;
void *tmp;
int err;
while (true) {
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
err = bpf_prog_get_next_id(id, &id);
if (err) {
if (errno != ENOENT) {
p_err("%s", strerror(errno));
goto err_close_fds;
}
return nb_fds;
}
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0) {
p_err("can't get prog by id (%u): %s",
id, strerror(errno));
goto err_close_fds;
}
err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info (%u): %s",
id, strerror(errno));
goto err_close_fd;
}
if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
close(fd);
continue;
}
if (!tag) {
get_prog_full_name(&info, fd, prog_name,
sizeof(prog_name));
if (strncmp(nametag, prog_name, sizeof(prog_name))) {
close(fd);
continue;
}
}
if (nb_fds > 0) {
tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
if (!tmp) {
p_err("failed to realloc");
goto err_close_fd;
}
*fds = tmp;
}
(*fds)[nb_fds++] = fd;
}
err_close_fd:
close(fd);
err_close_fds:
while (--nb_fds >= 0)
close((*fds)[nb_fds]);
return -1;
}
int prog_parse_fds(int *argc, char ***argv, int **fds)
{
if (is_prefix(**argv, "id")) {
unsigned int id;
char *endptr;
NEXT_ARGP();
id = strtoul(**argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as ID", **argv);
return -1;
}
NEXT_ARGP();
(*fds)[0] = bpf_prog_get_fd_by_id(id);
if ((*fds)[0] < 0) {
p_err("get by id (%u): %s", id, strerror(errno));
return -1;
}
return 1;
} else if (is_prefix(**argv, "tag")) {
unsigned char tag[BPF_TAG_SIZE];
NEXT_ARGP();
if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
!= BPF_TAG_SIZE) {
p_err("can't parse tag");
return -1;
}
NEXT_ARGP();
return prog_fd_by_nametag(tag, fds, true);
} else if (is_prefix(**argv, "name")) {
char *name;
NEXT_ARGP();
name = **argv;
if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
p_err("can't parse name");
return -1;
}
NEXT_ARGP();
return prog_fd_by_nametag(name, fds, false);
} else if (is_prefix(**argv, "pinned")) {
char *path;
NEXT_ARGP();
path = **argv;
NEXT_ARGP();
(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
if ((*fds)[0] < 0)
return -1;
return 1;
}
p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
return -1;
}
int prog_parse_fd(int *argc, char ***argv)
{
int *fds = NULL;
int nb_fds, fd;
fds = malloc(sizeof(int));
if (!fds) {
p_err("mem alloc failed");
return -1;
}
nb_fds = prog_parse_fds(argc, argv, &fds);
if (nb_fds != 1) {
if (nb_fds > 1) {
p_err("several programs match this handle");
while (nb_fds--)
close(fds[nb_fds]);
}
fd = -1;
goto exit_free;
}
fd = fds[0];
exit_free:
free(fds);
return fd;
}
static int map_fd_by_name(char *name, int **fds)
{
unsigned int id = 0;
int fd, nb_fds = 0;
void *tmp;
int err;
while (true) {
struct bpf_map_info info = {};
__u32 len = sizeof(info);
err = bpf_map_get_next_id(id, &id);
if (err) {
if (errno != ENOENT) {
p_err("%s", strerror(errno));
goto err_close_fds;
}
return nb_fds;
}
fd = bpf_map_get_fd_by_id(id);
if (fd < 0) {
p_err("can't get map by id (%u): %s",
id, strerror(errno));
goto err_close_fds;
}
err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get map info (%u): %s",
id, strerror(errno));
goto err_close_fd;
}
if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
close(fd);
continue;
}
if (nb_fds > 0) {
tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
if (!tmp) {
p_err("failed to realloc");
goto err_close_fd;
}
*fds = tmp;
}
(*fds)[nb_fds++] = fd;
}
err_close_fd:
close(fd);
err_close_fds:
while (--nb_fds >= 0)
close((*fds)[nb_fds]);
return -1;
}
int map_parse_fds(int *argc, char ***argv, int **fds)
{
if (is_prefix(**argv, "id")) {
unsigned int id;
char *endptr;
NEXT_ARGP();
id = strtoul(**argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as ID", **argv);
return -1;
}
NEXT_ARGP();
(*fds)[0] = bpf_map_get_fd_by_id(id);
if ((*fds)[0] < 0) {
p_err("get map by id (%u): %s", id, strerror(errno));
return -1;
}
return 1;
} else if (is_prefix(**argv, "name")) {
char *name;
NEXT_ARGP();
name = **argv;
if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
p_err("can't parse name");
return -1;
}
NEXT_ARGP();
return map_fd_by_name(name, fds);
} else if (is_prefix(**argv, "pinned")) {
char *path;
NEXT_ARGP();
path = **argv;
NEXT_ARGP();
(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
if ((*fds)[0] < 0)
return -1;
return 1;
}
p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
return -1;
}
int map_parse_fd(int *argc, char ***argv)
{
int *fds = NULL;
int nb_fds, fd;
fds = malloc(sizeof(int));
if (!fds) {
p_err("mem alloc failed");
return -1;
}
nb_fds = map_parse_fds(argc, argv, &fds);
if (nb_fds != 1) {
if (nb_fds > 1) {
p_err("several maps match this handle");
while (nb_fds--)
close(fds[nb_fds]);
}
fd = -1;
goto exit_free;
}
fd = fds[0];
exit_free:
free(fds);
return fd;
}
int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
__u32 *info_len)
{
int err;
int fd;
fd = map_parse_fd(argc, argv);
if (fd < 0)
return -1;
err = bpf_map_get_info_by_fd(fd, info, info_len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
return err;
}
return fd;
}
size_t hash_fn_for_key_as_id(long key, void *ctx)
{
return key;
}
bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
{
return k1 == k2;
}
const char *bpf_attach_type_input_str(enum bpf_attach_type t)
{
switch (t) {
case BPF_CGROUP_INET_INGRESS: return "ingress";
case BPF_CGROUP_INET_EGRESS: return "egress";
case BPF_CGROUP_INET_SOCK_CREATE: return "sock_create";
case BPF_CGROUP_INET_SOCK_RELEASE: return "sock_release";
case BPF_CGROUP_SOCK_OPS: return "sock_ops";
case BPF_CGROUP_DEVICE: return "device";
case BPF_CGROUP_INET4_BIND: return "bind4";
case BPF_CGROUP_INET6_BIND: return "bind6";
case BPF_CGROUP_INET4_CONNECT: return "connect4";
case BPF_CGROUP_INET6_CONNECT: return "connect6";
case BPF_CGROUP_INET4_POST_BIND: return "post_bind4";
case BPF_CGROUP_INET6_POST_BIND: return "post_bind6";
case BPF_CGROUP_INET4_GETPEERNAME: return "getpeername4";
case BPF_CGROUP_INET6_GETPEERNAME: return "getpeername6";
case BPF_CGROUP_INET4_GETSOCKNAME: return "getsockname4";
case BPF_CGROUP_INET6_GETSOCKNAME: return "getsockname6";
case BPF_CGROUP_UDP4_SENDMSG: return "sendmsg4";
case BPF_CGROUP_UDP6_SENDMSG: return "sendmsg6";
case BPF_CGROUP_SYSCTL: return "sysctl";
case BPF_CGROUP_UDP4_RECVMSG: return "recvmsg4";
case BPF_CGROUP_UDP6_RECVMSG: return "recvmsg6";
case BPF_CGROUP_GETSOCKOPT: return "getsockopt";
case BPF_CGROUP_SETSOCKOPT: return "setsockopt";
case BPF_TRACE_RAW_TP: return "raw_tp";
case BPF_TRACE_FENTRY: return "fentry";
case BPF_TRACE_FEXIT: return "fexit";
case BPF_MODIFY_RETURN: return "mod_ret";
case BPF_SK_REUSEPORT_SELECT: return "sk_skb_reuseport_select";
case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: return "sk_skb_reuseport_select_or_migrate";
default: return libbpf_bpf_attach_type_str(t);
}
}
int pathname_concat(char *buf, int buf_sz, const char *path,
const char *name)
{
int len;
len = snprintf(buf, buf_sz, "%s/%s", path, name);
if (len < 0)
return -EINVAL;
if (len >= buf_sz)
return -ENAMETOOLONG;
return 0;
}
| linux-master | tools/bpf/bpftool/common.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2020 Facebook */
#include <errno.h>
#include <linux/err.h>
#include <linux/netfilter.h>
#include <linux/netfilter_arp.h>
#include <linux/perf_event.h>
#include <net/if.h>
#include <stdio.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/hashmap.h>
#include "json_writer.h"
#include "main.h"
#include "xlated_dumper.h"
#define PERF_HW_CACHE_LEN 128
static struct hashmap *link_table;
static struct dump_data dd;
static const char *perf_type_name[PERF_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = "hardware",
[PERF_TYPE_SOFTWARE] = "software",
[PERF_TYPE_TRACEPOINT] = "tracepoint",
[PERF_TYPE_HW_CACHE] = "hw-cache",
[PERF_TYPE_RAW] = "raw",
[PERF_TYPE_BREAKPOINT] = "breakpoint",
};
const char *event_symbols_hw[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = "cpu-cycles",
[PERF_COUNT_HW_INSTRUCTIONS] = "instructions",
[PERF_COUNT_HW_CACHE_REFERENCES] = "cache-references",
[PERF_COUNT_HW_CACHE_MISSES] = "cache-misses",
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "branch-instructions",
[PERF_COUNT_HW_BRANCH_MISSES] = "branch-misses",
[PERF_COUNT_HW_BUS_CYCLES] = "bus-cycles",
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "stalled-cycles-frontend",
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "stalled-cycles-backend",
[PERF_COUNT_HW_REF_CPU_CYCLES] = "ref-cycles",
};
const char *event_symbols_sw[PERF_COUNT_SW_MAX] = {
[PERF_COUNT_SW_CPU_CLOCK] = "cpu-clock",
[PERF_COUNT_SW_TASK_CLOCK] = "task-clock",
[PERF_COUNT_SW_PAGE_FAULTS] = "page-faults",
[PERF_COUNT_SW_CONTEXT_SWITCHES] = "context-switches",
[PERF_COUNT_SW_CPU_MIGRATIONS] = "cpu-migrations",
[PERF_COUNT_SW_PAGE_FAULTS_MIN] = "minor-faults",
[PERF_COUNT_SW_PAGE_FAULTS_MAJ] = "major-faults",
[PERF_COUNT_SW_ALIGNMENT_FAULTS] = "alignment-faults",
[PERF_COUNT_SW_EMULATION_FAULTS] = "emulation-faults",
[PERF_COUNT_SW_DUMMY] = "dummy",
[PERF_COUNT_SW_BPF_OUTPUT] = "bpf-output",
[PERF_COUNT_SW_CGROUP_SWITCHES] = "cgroup-switches",
};
const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = {
[PERF_COUNT_HW_CACHE_L1D] = "L1-dcache",
[PERF_COUNT_HW_CACHE_L1I] = "L1-icache",
[PERF_COUNT_HW_CACHE_LL] = "LLC",
[PERF_COUNT_HW_CACHE_DTLB] = "dTLB",
[PERF_COUNT_HW_CACHE_ITLB] = "iTLB",
[PERF_COUNT_HW_CACHE_BPU] = "branch",
[PERF_COUNT_HW_CACHE_NODE] = "node",
};
const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = {
[PERF_COUNT_HW_CACHE_OP_READ] = "load",
[PERF_COUNT_HW_CACHE_OP_WRITE] = "store",
[PERF_COUNT_HW_CACHE_OP_PREFETCH] = "prefetch",
};
const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[PERF_COUNT_HW_CACHE_RESULT_ACCESS] = "refs",
[PERF_COUNT_HW_CACHE_RESULT_MISS] = "misses",
};
#define perf_event_name(array, id) ({ \
const char *event_str = NULL; \
\
if ((id) < ARRAY_SIZE(array)) \
event_str = array[id]; \
event_str; \
})
static int link_parse_fd(int *argc, char ***argv)
{
int fd;
if (is_prefix(**argv, "id")) {
unsigned int id;
char *endptr;
NEXT_ARGP();
id = strtoul(**argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as ID", **argv);
return -1;
}
NEXT_ARGP();
fd = bpf_link_get_fd_by_id(id);
if (fd < 0)
p_err("failed to get link with ID %d: %s", id, strerror(errno));
return fd;
} else if (is_prefix(**argv, "pinned")) {
char *path;
NEXT_ARGP();
path = **argv;
NEXT_ARGP();
return open_obj_pinned_any(path, BPF_OBJ_LINK);
}
p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
return -1;
}
static void
show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
{
const char *link_type_str;
jsonw_uint_field(wtr, "id", info->id);
link_type_str = libbpf_bpf_link_type_str(info->type);
if (link_type_str)
jsonw_string_field(wtr, "type", link_type_str);
else
jsonw_uint_field(wtr, "type", info->type);
jsonw_uint_field(json_wtr, "prog_id", info->prog_id);
}
static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
{
const char *attach_type_str;
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
if (attach_type_str)
jsonw_string_field(wtr, "attach_type", attach_type_str);
else
jsonw_uint_field(wtr, "attach_type", attach_type);
}
static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr)
{
char devname[IF_NAMESIZE] = "(unknown)";
if (ifindex)
if_indextoname(ifindex, devname);
else
snprintf(devname, sizeof(devname), "(detached)");
jsonw_string_field(wtr, "devname", devname);
jsonw_uint_field(wtr, "ifindex", ifindex);
}
static bool is_iter_map_target(const char *target_name)
{
return strcmp(target_name, "bpf_map_elem") == 0 ||
strcmp(target_name, "bpf_sk_storage_map") == 0;
}
static bool is_iter_cgroup_target(const char *target_name)
{
return strcmp(target_name, "cgroup") == 0;
}
static const char *cgroup_order_string(__u32 order)
{
switch (order) {
case BPF_CGROUP_ITER_ORDER_UNSPEC:
return "order_unspec";
case BPF_CGROUP_ITER_SELF_ONLY:
return "self_only";
case BPF_CGROUP_ITER_DESCENDANTS_PRE:
return "descendants_pre";
case BPF_CGROUP_ITER_DESCENDANTS_POST:
return "descendants_post";
case BPF_CGROUP_ITER_ANCESTORS_UP:
return "ancestors_up";
default: /* won't happen */
return "unknown";
}
}
static bool is_iter_task_target(const char *target_name)
{
return strcmp(target_name, "task") == 0 ||
strcmp(target_name, "task_file") == 0 ||
strcmp(target_name, "task_vma") == 0;
}
static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
{
const char *target_name = u64_to_ptr(info->iter.target_name);
jsonw_string_field(wtr, "target_name", target_name);
if (is_iter_map_target(target_name))
jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
else if (is_iter_task_target(target_name)) {
if (info->iter.task.tid)
jsonw_uint_field(wtr, "tid", info->iter.task.tid);
else if (info->iter.task.pid)
jsonw_uint_field(wtr, "pid", info->iter.task.pid);
}
if (is_iter_cgroup_target(target_name)) {
jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
jsonw_string_field(wtr, "order",
cgroup_order_string(info->iter.cgroup.order));
}
}
void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
{
jsonw_uint_field(json_wtr, "pf",
info->netfilter.pf);
jsonw_uint_field(json_wtr, "hook",
info->netfilter.hooknum);
jsonw_int_field(json_wtr, "prio",
info->netfilter.priority);
jsonw_uint_field(json_wtr, "flags",
info->netfilter.flags);
}
static int get_prog_info(int prog_id, struct bpf_prog_info *info)
{
__u32 len = sizeof(*info);
int err, prog_fd;
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd < 0)
return prog_fd;
memset(info, 0, sizeof(*info));
err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
if (err)
p_err("can't get prog info: %s", strerror(errno));
close(prog_fd);
return err;
}
static int cmp_u64(const void *A, const void *B)
{
const __u64 *a = A, *b = B;
return *a - *b;
}
static void
show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
{
__u32 i, j = 0;
__u64 *addrs;
jsonw_bool_field(json_wtr, "retprobe",
info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
jsonw_name(json_wtr, "funcs");
jsonw_start_array(json_wtr);
addrs = u64_to_ptr(info->kprobe_multi.addrs);
qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64);
/* Load it once for all. */
if (!dd.sym_count)
kernel_syms_load(&dd);
for (i = 0; i < dd.sym_count; i++) {
if (dd.sym_mapping[i].address != addrs[j])
continue;
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name);
/* Print null if it is vmlinux */
if (dd.sym_mapping[i].module[0] == '\0') {
jsonw_name(json_wtr, "module");
jsonw_null(json_wtr);
} else {
jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
}
jsonw_end_object(json_wtr);
if (j++ == info->kprobe_multi.count)
break;
}
jsonw_end_array(json_wtr);
}
static void
show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
{
jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE);
jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr);
jsonw_string_field(wtr, "func",
u64_to_ptr(info->perf_event.kprobe.func_name));
jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
}
static void
show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
{
jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE);
jsonw_string_field(wtr, "file",
u64_to_ptr(info->perf_event.uprobe.file_name));
jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
}
static void
show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
{
jsonw_string_field(wtr, "tracepoint",
u64_to_ptr(info->perf_event.tracepoint.tp_name));
}
static char *perf_config_hw_cache_str(__u64 config)
{
const char *hw_cache, *result, *op;
char *str = malloc(PERF_HW_CACHE_LEN);
if (!str) {
p_err("mem alloc failed");
return NULL;
}
hw_cache = perf_event_name(evsel__hw_cache, config & 0xff);
if (hw_cache)
snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache);
else
snprintf(str, PERF_HW_CACHE_LEN, "%lld-", config & 0xff);
op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff);
if (op)
snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
"%s-", op);
else
snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
"%lld-", (config >> 8) & 0xff);
result = perf_event_name(evsel__hw_cache_result, config >> 16);
if (result)
snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
"%s", result);
else
snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
"%lld", config >> 16);
return str;
}
static const char *perf_config_str(__u32 type, __u64 config)
{
const char *perf_config;
switch (type) {
case PERF_TYPE_HARDWARE:
perf_config = perf_event_name(event_symbols_hw, config);
break;
case PERF_TYPE_SOFTWARE:
perf_config = perf_event_name(event_symbols_sw, config);
break;
case PERF_TYPE_HW_CACHE:
perf_config = perf_config_hw_cache_str(config);
break;
default:
perf_config = NULL;
break;
}
return perf_config;
}
static void
show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
{
__u64 config = info->perf_event.event.config;
__u32 type = info->perf_event.event.type;
const char *perf_type, *perf_config;
perf_type = perf_event_name(perf_type_name, type);
if (perf_type)
jsonw_string_field(wtr, "event_type", perf_type);
else
jsonw_uint_field(wtr, "event_type", type);
perf_config = perf_config_str(type, config);
if (perf_config)
jsonw_string_field(wtr, "event_config", perf_config);
else
jsonw_uint_field(wtr, "event_config", config);
if (type == PERF_TYPE_HW_CACHE && perf_config)
free((void *)perf_config);
}
static int show_link_close_json(int fd, struct bpf_link_info *info)
{
struct bpf_prog_info prog_info;
const char *prog_type_str;
int err;
jsonw_start_object(json_wtr);
show_link_header_json(info, json_wtr);
switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT:
jsonw_string_field(json_wtr, "tp_name",
u64_to_ptr(info->raw_tracepoint.tp_name));
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);
if (err)
return err;
prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
/* libbpf will return NULL for variants unknown to it. */
if (prog_type_str)
jsonw_string_field(json_wtr, "prog_type", prog_type_str);
else
jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
show_link_attach_type_json(info->tracing.attach_type,
json_wtr);
jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
break;
case BPF_LINK_TYPE_CGROUP:
jsonw_lluint_field(json_wtr, "cgroup_id",
info->cgroup.cgroup_id);
show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
break;
case BPF_LINK_TYPE_ITER:
show_iter_json(info, json_wtr);
break;
case BPF_LINK_TYPE_NETNS:
jsonw_uint_field(json_wtr, "netns_ino",
info->netns.netns_ino);
show_link_attach_type_json(info->netns.attach_type, json_wtr);
break;
case BPF_LINK_TYPE_NETFILTER:
netfilter_dump_json(info, json_wtr);
break;
case BPF_LINK_TYPE_TCX:
show_link_ifindex_json(info->tcx.ifindex, json_wtr);
show_link_attach_type_json(info->tcx.attach_type, json_wtr);
break;
case BPF_LINK_TYPE_XDP:
show_link_ifindex_json(info->xdp.ifindex, json_wtr);
break;
case BPF_LINK_TYPE_STRUCT_OPS:
jsonw_uint_field(json_wtr, "map_id",
info->struct_ops.map_id);
break;
case BPF_LINK_TYPE_KPROBE_MULTI:
show_kprobe_multi_json(info, json_wtr);
break;
case BPF_LINK_TYPE_PERF_EVENT:
switch (info->perf_event.type) {
case BPF_PERF_EVENT_EVENT:
show_perf_event_event_json(info, json_wtr);
break;
case BPF_PERF_EVENT_TRACEPOINT:
show_perf_event_tracepoint_json(info, json_wtr);
break;
case BPF_PERF_EVENT_KPROBE:
case BPF_PERF_EVENT_KRETPROBE:
show_perf_event_kprobe_json(info, json_wtr);
break;
case BPF_PERF_EVENT_UPROBE:
case BPF_PERF_EVENT_URETPROBE:
show_perf_event_uprobe_json(info, json_wtr);
break;
default:
break;
}
break;
default:
break;
}
if (!hashmap__empty(link_table)) {
struct hashmap_entry *entry;
jsonw_name(json_wtr, "pinned");
jsonw_start_array(json_wtr);
hashmap__for_each_key_entry(link_table, entry, info->id)
jsonw_string(json_wtr, entry->pvalue);
jsonw_end_array(json_wtr);
}
emit_obj_refs_json(refs_table, info->id, json_wtr);
jsonw_end_object(json_wtr);
return 0;
}
static void show_link_header_plain(struct bpf_link_info *info)
{
const char *link_type_str;
printf("%u: ", info->id);
link_type_str = libbpf_bpf_link_type_str(info->type);
if (link_type_str)
printf("%s ", link_type_str);
else
printf("type %u ", info->type);
if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
printf("map %u ", info->struct_ops.map_id);
else
printf("prog %u ", info->prog_id);
}
static void show_link_attach_type_plain(__u32 attach_type)
{
const char *attach_type_str;
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
if (attach_type_str)
printf("attach_type %s ", attach_type_str);
else
printf("attach_type %u ", attach_type);
}
static void show_link_ifindex_plain(__u32 ifindex)
{
char devname[IF_NAMESIZE * 2] = "(unknown)";
char tmpname[IF_NAMESIZE];
char *ret = NULL;
if (ifindex)
ret = if_indextoname(ifindex, tmpname);
else
snprintf(devname, sizeof(devname), "(detached)");
if (ret)
snprintf(devname, sizeof(devname), "%s(%d)",
tmpname, ifindex);
printf("ifindex %s ", devname);
}
static void show_iter_plain(struct bpf_link_info *info)
{
const char *target_name = u64_to_ptr(info->iter.target_name);
printf("target_name %s ", target_name);
if (is_iter_map_target(target_name))
printf("map_id %u ", info->iter.map.map_id);
else if (is_iter_task_target(target_name)) {
if (info->iter.task.tid)
printf("tid %u ", info->iter.task.tid);
else if (info->iter.task.pid)
printf("pid %u ", info->iter.task.pid);
}
if (is_iter_cgroup_target(target_name)) {
printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id);
printf("order %s ",
cgroup_order_string(info->iter.cgroup.order));
}
}
static const char * const pf2name[] = {
[NFPROTO_INET] = "inet",
[NFPROTO_IPV4] = "ip",
[NFPROTO_ARP] = "arp",
[NFPROTO_NETDEV] = "netdev",
[NFPROTO_BRIDGE] = "bridge",
[NFPROTO_IPV6] = "ip6",
};
static const char * const inethook2name[] = {
[NF_INET_PRE_ROUTING] = "prerouting",
[NF_INET_LOCAL_IN] = "input",
[NF_INET_FORWARD] = "forward",
[NF_INET_LOCAL_OUT] = "output",
[NF_INET_POST_ROUTING] = "postrouting",
};
static const char * const arphook2name[] = {
[NF_ARP_IN] = "input",
[NF_ARP_OUT] = "output",
};
void netfilter_dump_plain(const struct bpf_link_info *info)
{
const char *hookname = NULL, *pfname = NULL;
unsigned int hook = info->netfilter.hooknum;
unsigned int pf = info->netfilter.pf;
if (pf < ARRAY_SIZE(pf2name))
pfname = pf2name[pf];
switch (pf) {
case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
case NFPROTO_IPV4:
case NFPROTO_IPV6:
case NFPROTO_INET:
if (hook < ARRAY_SIZE(inethook2name))
hookname = inethook2name[hook];
break;
case NFPROTO_ARP:
if (hook < ARRAY_SIZE(arphook2name))
hookname = arphook2name[hook];
default:
break;
}
if (pfname)
printf("\n\t%s", pfname);
else
printf("\n\tpf: %d", pf);
if (hookname)
printf(" %s", hookname);
else
printf(", hook %u,", hook);
printf(" prio %d", info->netfilter.priority);
if (info->netfilter.flags)
printf(" flags 0x%x", info->netfilter.flags);
}
static void show_kprobe_multi_plain(struct bpf_link_info *info)
{
__u32 i, j = 0;
__u64 *addrs;
if (!info->kprobe_multi.count)
return;
if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN)
printf("\n\tkretprobe.multi ");
else
printf("\n\tkprobe.multi ");
printf("func_cnt %u ", info->kprobe_multi.count);
addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
/* Load it once for all. */
if (!dd.sym_count)
kernel_syms_load(&dd);
if (!dd.sym_count)
return;
printf("\n\t%-16s %s", "addr", "func [module]");
for (i = 0; i < dd.sym_count; i++) {
if (dd.sym_mapping[i].address != addrs[j])
continue;
printf("\n\t%016lx %s",
dd.sym_mapping[i].address, dd.sym_mapping[i].name);
if (dd.sym_mapping[i].module[0] != '\0')
printf(" [%s] ", dd.sym_mapping[i].module);
else
printf(" ");
if (j++ == info->kprobe_multi.count)
break;
}
}
static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
{
const char *buf;
buf = u64_to_ptr(info->perf_event.kprobe.func_name);
if (buf[0] == '\0' && !info->perf_event.kprobe.addr)
return;
if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE)
printf("\n\tkretprobe ");
else
printf("\n\tkprobe ");
if (info->perf_event.kprobe.addr)
printf("%llx ", info->perf_event.kprobe.addr);
printf("%s", buf);
if (info->perf_event.kprobe.offset)
printf("+%#x", info->perf_event.kprobe.offset);
printf(" ");
}
static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
{
const char *buf;
buf = u64_to_ptr(info->perf_event.uprobe.file_name);
if (buf[0] == '\0')
return;
if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE)
printf("\n\turetprobe ");
else
printf("\n\tuprobe ");
printf("%s+%#x ", buf, info->perf_event.uprobe.offset);
}
static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
{
const char *buf;
buf = u64_to_ptr(info->perf_event.tracepoint.tp_name);
if (buf[0] == '\0')
return;
printf("\n\ttracepoint %s ", buf);
}
static void show_perf_event_event_plain(struct bpf_link_info *info)
{
__u64 config = info->perf_event.event.config;
__u32 type = info->perf_event.event.type;
const char *perf_type, *perf_config;
printf("\n\tevent ");
perf_type = perf_event_name(perf_type_name, type);
if (perf_type)
printf("%s:", perf_type);
else
printf("%u :", type);
perf_config = perf_config_str(type, config);
if (perf_config)
printf("%s ", perf_config);
else
printf("%llu ", config);
if (type == PERF_TYPE_HW_CACHE && perf_config)
free((void *)perf_config);
}
static int show_link_close_plain(int fd, struct bpf_link_info *info)
{
struct bpf_prog_info prog_info;
const char *prog_type_str;
int err;
show_link_header_plain(info);
switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT:
printf("\n\ttp '%s' ",
(const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);
if (err)
return err;
prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
/* libbpf will return NULL for variants unknown to it. */
if (prog_type_str)
printf("\n\tprog_type %s ", prog_type_str);
else
printf("\n\tprog_type %u ", prog_info.type);
show_link_attach_type_plain(info->tracing.attach_type);
if (info->tracing.target_obj_id || info->tracing.target_btf_id)
printf("\n\ttarget_obj_id %u target_btf_id %u ",
info->tracing.target_obj_id,
info->tracing.target_btf_id);
break;
case BPF_LINK_TYPE_CGROUP:
printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id);
show_link_attach_type_plain(info->cgroup.attach_type);
break;
case BPF_LINK_TYPE_ITER:
show_iter_plain(info);
break;
case BPF_LINK_TYPE_NETNS:
printf("\n\tnetns_ino %u ", info->netns.netns_ino);
show_link_attach_type_plain(info->netns.attach_type);
break;
case BPF_LINK_TYPE_NETFILTER:
netfilter_dump_plain(info);
break;
case BPF_LINK_TYPE_TCX:
printf("\n\t");
show_link_ifindex_plain(info->tcx.ifindex);
show_link_attach_type_plain(info->tcx.attach_type);
break;
case BPF_LINK_TYPE_XDP:
printf("\n\t");
show_link_ifindex_plain(info->xdp.ifindex);
break;
case BPF_LINK_TYPE_KPROBE_MULTI:
show_kprobe_multi_plain(info);
break;
case BPF_LINK_TYPE_PERF_EVENT:
switch (info->perf_event.type) {
case BPF_PERF_EVENT_EVENT:
show_perf_event_event_plain(info);
break;
case BPF_PERF_EVENT_TRACEPOINT:
show_perf_event_tracepoint_plain(info);
break;
case BPF_PERF_EVENT_KPROBE:
case BPF_PERF_EVENT_KRETPROBE:
show_perf_event_kprobe_plain(info);
break;
case BPF_PERF_EVENT_UPROBE:
case BPF_PERF_EVENT_URETPROBE:
show_perf_event_uprobe_plain(info);
break;
default:
break;
}
break;
default:
break;
}
if (!hashmap__empty(link_table)) {
struct hashmap_entry *entry;
hashmap__for_each_key_entry(link_table, entry, info->id)
printf("\n\tpinned %s", (char *)entry->pvalue);
}
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
printf("\n");
return 0;
}
static int do_show_link(int fd)
{
struct bpf_link_info info;
__u32 len = sizeof(info);
__u64 *addrs = NULL;
char buf[PATH_MAX];
int count;
int err;
memset(&info, 0, sizeof(info));
buf[0] = '\0';
again:
err = bpf_link_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get link info: %s",
strerror(errno));
close(fd);
return err;
}
if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
!info.raw_tracepoint.tp_name) {
info.raw_tracepoint.tp_name = ptr_to_u64(&buf);
info.raw_tracepoint.tp_name_len = sizeof(buf);
goto again;
}
if (info.type == BPF_LINK_TYPE_ITER &&
!info.iter.target_name) {
info.iter.target_name = ptr_to_u64(&buf);
info.iter.target_name_len = sizeof(buf);
goto again;
}
if (info.type == BPF_LINK_TYPE_KPROBE_MULTI &&
!info.kprobe_multi.addrs) {
count = info.kprobe_multi.count;
if (count) {
addrs = calloc(count, sizeof(__u64));
if (!addrs) {
p_err("mem alloc failed");
close(fd);
return -ENOMEM;
}
info.kprobe_multi.addrs = ptr_to_u64(addrs);
goto again;
}
}
if (info.type == BPF_LINK_TYPE_PERF_EVENT) {
switch (info.perf_event.type) {
case BPF_PERF_EVENT_TRACEPOINT:
if (!info.perf_event.tracepoint.tp_name) {
info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
info.perf_event.tracepoint.name_len = sizeof(buf);
goto again;
}
break;
case BPF_PERF_EVENT_KPROBE:
case BPF_PERF_EVENT_KRETPROBE:
if (!info.perf_event.kprobe.func_name) {
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
info.perf_event.kprobe.name_len = sizeof(buf);
goto again;
}
break;
case BPF_PERF_EVENT_UPROBE:
case BPF_PERF_EVENT_URETPROBE:
if (!info.perf_event.uprobe.file_name) {
info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
info.perf_event.uprobe.name_len = sizeof(buf);
goto again;
}
break;
default:
break;
}
}
if (json_output)
show_link_close_json(fd, &info);
else
show_link_close_plain(fd, &info);
if (addrs)
free(addrs);
close(fd);
return 0;
}
static int do_show(int argc, char **argv)
{
__u32 id = 0;
int err, fd;
if (show_pinned) {
link_table = hashmap__new(hash_fn_for_key_as_id,
equal_fn_for_key_as_id, NULL);
if (IS_ERR(link_table)) {
p_err("failed to create hashmap for pinned paths");
return -1;
}
build_pinned_obj_table(link_table, BPF_OBJ_LINK);
}
build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
if (argc == 2) {
fd = link_parse_fd(&argc, &argv);
if (fd < 0)
return fd;
do_show_link(fd);
goto out;
}
if (argc)
return BAD_ARG();
if (json_output)
jsonw_start_array(json_wtr);
while (true) {
err = bpf_link_get_next_id(id, &id);
if (err) {
if (errno == ENOENT)
break;
p_err("can't get next link: %s%s", strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
break;
}
fd = bpf_link_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
p_err("can't get link by id (%u): %s",
id, strerror(errno));
break;
}
err = do_show_link(fd);
if (err)
break;
}
if (json_output)
jsonw_end_array(json_wtr);
delete_obj_refs_table(refs_table);
if (show_pinned)
delete_pinned_obj_table(link_table);
out:
if (dd.sym_count)
kernel_syms_destroy(&dd);
return errno == ENOENT ? 0 : -1;
}
static int do_pin(int argc, char **argv)
{
int err;
err = do_pin_any(argc, argv, link_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
}
static int do_detach(int argc, char **argv)
{
int err, fd;
if (argc != 2) {
p_err("link specifier is invalid or missing\n");
return 1;
}
fd = link_parse_fd(&argc, &argv);
if (fd < 0)
return 1;
err = bpf_link_detach(fd);
if (err)
err = -errno;
close(fd);
if (err) {
p_err("failed link detach: %s", strerror(-err));
return 1;
}
if (json_output)
jsonw_null(json_wtr);
return 0;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s { show | list } [LINK]\n"
" %1$s %2$s pin LINK FILE\n"
" %1$s %2$s detach LINK\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_LINK "\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-n|--nomount} }\n"
"",
bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
{ "help", do_help },
{ "pin", do_pin },
{ "detach", do_detach },
{ 0 }
};
int do_link(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/link.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/list.h>
#include <stdlib.h>
#include <string.h>
#include "cfg.h"
#include "main.h"
#include "xlated_dumper.h"
struct cfg {
struct list_head funcs;
int func_num;
};
struct func_node {
struct list_head l;
struct list_head bbs;
struct bpf_insn *start;
struct bpf_insn *end;
int idx;
int bb_num;
};
struct bb_node {
struct list_head l;
struct list_head e_prevs;
struct list_head e_succs;
struct bpf_insn *head;
struct bpf_insn *tail;
int idx;
};
#define EDGE_FLAG_EMPTY 0x0
#define EDGE_FLAG_FALLTHROUGH 0x1
#define EDGE_FLAG_JUMP 0x2
struct edge_node {
struct list_head l;
struct bb_node *src;
struct bb_node *dst;
int flags;
};
#define ENTRY_BLOCK_INDEX 0
#define EXIT_BLOCK_INDEX 1
#define NUM_FIXED_BLOCKS 2
#define func_prev(func) list_prev_entry(func, l)
#define func_next(func) list_next_entry(func, l)
#define bb_prev(bb) list_prev_entry(bb, l)
#define bb_next(bb) list_next_entry(bb, l)
#define entry_bb(func) func_first_bb(func)
#define exit_bb(func) func_last_bb(func)
#define cfg_first_func(cfg) \
list_first_entry(&cfg->funcs, struct func_node, l)
#define cfg_last_func(cfg) \
list_last_entry(&cfg->funcs, struct func_node, l)
#define func_first_bb(func) \
list_first_entry(&func->bbs, struct bb_node, l)
#define func_last_bb(func) \
list_last_entry(&func->bbs, struct bb_node, l)
static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
{
struct func_node *new_func, *func;
list_for_each_entry(func, &cfg->funcs, l) {
if (func->start == insn)
return func;
else if (func->start > insn)
break;
}
func = func_prev(func);
new_func = calloc(1, sizeof(*new_func));
if (!new_func) {
p_err("OOM when allocating FUNC node");
return NULL;
}
new_func->start = insn;
new_func->idx = cfg->func_num;
list_add(&new_func->l, &func->l);
cfg->func_num++;
return new_func;
}
static struct bb_node *func_append_bb(struct func_node *func,
struct bpf_insn *insn)
{
struct bb_node *new_bb, *bb;
list_for_each_entry(bb, &func->bbs, l) {
if (bb->head == insn)
return bb;
else if (bb->head > insn)
break;
}
bb = bb_prev(bb);
new_bb = calloc(1, sizeof(*new_bb));
if (!new_bb) {
p_err("OOM when allocating BB node");
return NULL;
}
new_bb->head = insn;
INIT_LIST_HEAD(&new_bb->e_prevs);
INIT_LIST_HEAD(&new_bb->e_succs);
list_add(&new_bb->l, &bb->l);
return new_bb;
}
static struct bb_node *func_insert_dummy_bb(struct list_head *after)
{
struct bb_node *bb;
bb = calloc(1, sizeof(*bb));
if (!bb) {
p_err("OOM when allocating BB node");
return NULL;
}
INIT_LIST_HEAD(&bb->e_prevs);
INIT_LIST_HEAD(&bb->e_succs);
list_add(&bb->l, after);
return bb;
}
static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
struct bpf_insn *end)
{
struct func_node *func, *last_func;
func = cfg_append_func(cfg, cur);
if (!func)
return true;
for (; cur < end; cur++) {
if (cur->code != (BPF_JMP | BPF_CALL))
continue;
if (cur->src_reg != BPF_PSEUDO_CALL)
continue;
func = cfg_append_func(cfg, cur + cur->off + 1);
if (!func)
return true;
}
last_func = cfg_last_func(cfg);
last_func->end = end - 1;
func = cfg_first_func(cfg);
list_for_each_entry_from(func, &last_func->l, l) {
func->end = func_next(func)->start - 1;
}
return false;
}
static bool is_jmp_insn(__u8 code)
{
return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
}
static bool func_partition_bb_head(struct func_node *func)
{
struct bpf_insn *cur, *end;
struct bb_node *bb;
cur = func->start;
end = func->end;
INIT_LIST_HEAD(&func->bbs);
bb = func_append_bb(func, cur);
if (!bb)
return true;
for (; cur <= end; cur++) {
if (is_jmp_insn(cur->code)) {
__u8 opcode = BPF_OP(cur->code);
if (opcode == BPF_EXIT || opcode == BPF_CALL)
continue;
bb = func_append_bb(func, cur + cur->off + 1);
if (!bb)
return true;
if (opcode != BPF_JA) {
bb = func_append_bb(func, cur + 1);
if (!bb)
return true;
}
}
}
return false;
}
static void func_partition_bb_tail(struct func_node *func)
{
unsigned int bb_idx = NUM_FIXED_BLOCKS;
struct bb_node *bb, *last;
last = func_last_bb(func);
last->tail = func->end;
bb = func_first_bb(func);
list_for_each_entry_from(bb, &last->l, l) {
bb->tail = bb_next(bb)->head - 1;
bb->idx = bb_idx++;
}
last->idx = bb_idx++;
func->bb_num = bb_idx;
}
static bool func_add_special_bb(struct func_node *func)
{
struct bb_node *bb;
bb = func_insert_dummy_bb(&func->bbs);
if (!bb)
return true;
bb->idx = ENTRY_BLOCK_INDEX;
bb = func_insert_dummy_bb(&func_last_bb(func)->l);
if (!bb)
return true;
bb->idx = EXIT_BLOCK_INDEX;
return false;
}
static bool func_partition_bb(struct func_node *func)
{
if (func_partition_bb_head(func))
return true;
func_partition_bb_tail(func);
return false;
}
static struct bb_node *func_search_bb_with_head(struct func_node *func,
struct bpf_insn *insn)
{
struct bb_node *bb;
list_for_each_entry(bb, &func->bbs, l) {
if (bb->head == insn)
return bb;
}
return NULL;
}
static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
int flags)
{
struct edge_node *e;
e = calloc(1, sizeof(*e));
if (!e) {
p_err("OOM when allocating edge node");
return NULL;
}
if (src)
e->src = src;
if (dst)
e->dst = dst;
e->flags |= flags;
return e;
}
static bool func_add_bb_edges(struct func_node *func)
{
struct bpf_insn *insn;
struct edge_node *e;
struct bb_node *bb;
bb = entry_bb(func);
e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
if (!e)
return true;
list_add_tail(&e->l, &bb->e_succs);
bb = exit_bb(func);
e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
if (!e)
return true;
list_add_tail(&e->l, &bb->e_prevs);
bb = entry_bb(func);
bb = bb_next(bb);
list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
if (!e)
return true;
e->src = bb;
insn = bb->tail;
if (!is_jmp_insn(insn->code) ||
BPF_OP(insn->code) == BPF_EXIT) {
e->dst = bb_next(bb);
e->flags |= EDGE_FLAG_FALLTHROUGH;
list_add_tail(&e->l, &bb->e_succs);
continue;
} else if (BPF_OP(insn->code) == BPF_JA) {
e->dst = func_search_bb_with_head(func,
insn + insn->off + 1);
e->flags |= EDGE_FLAG_JUMP;
list_add_tail(&e->l, &bb->e_succs);
continue;
}
e->dst = bb_next(bb);
e->flags |= EDGE_FLAG_FALLTHROUGH;
list_add_tail(&e->l, &bb->e_succs);
e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
if (!e)
return true;
e->src = bb;
e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
list_add_tail(&e->l, &bb->e_succs);
}
return false;
}
static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
{
int cnt = len / sizeof(*insn);
struct func_node *func;
INIT_LIST_HEAD(&cfg->funcs);
if (cfg_partition_funcs(cfg, insn, insn + cnt))
return true;
list_for_each_entry(func, &cfg->funcs, l) {
if (func_partition_bb(func) || func_add_special_bb(func))
return true;
if (func_add_bb_edges(func))
return true;
}
return false;
}
static void cfg_destroy(struct cfg *cfg)
{
struct func_node *func, *func2;
list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
struct bb_node *bb, *bb2;
list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
struct edge_node *e, *e2;
list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
list_del(&e->l);
free(e);
}
list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
list_del(&e->l);
free(e);
}
list_del(&bb->l);
free(bb);
}
list_del(&func->l);
free(func);
}
}
static void
draw_bb_node(struct func_node *func, struct bb_node *bb, struct dump_data *dd,
bool opcodes, bool linum)
{
const char *shape;
if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
shape = "Mdiamond";
else
shape = "record";
printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
func->idx, bb->idx, shape);
if (bb->idx == ENTRY_BLOCK_INDEX) {
printf("ENTRY");
} else if (bb->idx == EXIT_BLOCK_INDEX) {
printf("EXIT");
} else {
unsigned int start_idx;
printf("{\\\n");
start_idx = bb->head - func->start;
dump_xlated_for_graph(dd, bb->head, bb->tail, start_idx,
opcodes, linum);
printf("}");
}
printf("\"];\n\n");
}
static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
{
const char *style = "\"solid,bold\"";
const char *color = "black";
int func_idx = func->idx;
struct edge_node *e;
int weight = 10;
if (list_empty(&bb->e_succs))
return;
list_for_each_entry(e, &bb->e_succs, l) {
printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
func_idx, e->src->idx, func_idx, e->dst->idx,
style, color, weight);
printf("];\n");
}
}
static void
func_output_bb_def(struct func_node *func, struct dump_data *dd,
bool opcodes, bool linum)
{
struct bb_node *bb;
list_for_each_entry(bb, &func->bbs, l) {
draw_bb_node(func, bb, dd, opcodes, linum);
}
}
static void func_output_edges(struct func_node *func)
{
int func_idx = func->idx;
struct bb_node *bb;
list_for_each_entry(bb, &func->bbs, l) {
draw_bb_succ_edges(func, bb);
}
/* Add an invisible edge from ENTRY to EXIT, this is to
* improve the graph layout.
*/
printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
}
static void
cfg_dump(struct cfg *cfg, struct dump_data *dd, bool opcodes, bool linum)
{
struct func_node *func;
printf("digraph \"DOT graph for eBPF program\" {\n");
list_for_each_entry(func, &cfg->funcs, l) {
printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
func->idx, func->idx);
func_output_bb_def(func, dd, opcodes, linum);
func_output_edges(func);
printf("}\n");
}
printf("}\n");
}
void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes, bool linum)
{
struct bpf_insn *insn = buf;
struct cfg cfg;
memset(&cfg, 0, sizeof(cfg));
if (cfg_build(&cfg, insn, len))
return;
cfg_dump(&cfg, dd, opcodes, linum);
cfg_destroy(&cfg);
}
| linux-master | tools/bpf/bpftool/cfg.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
// Author: Yonghong Song <[email protected]>
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <dirent.h>
#include <bpf/bpf.h>
#include "main.h"
/* 0: undecided, 1: supported, 2: not supported */
static int perf_query_supported;
static bool has_perf_query_support(void)
{
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
char buf[256];
int fd;
if (perf_query_supported)
goto out;
fd = open("/", O_RDONLY);
if (fd < 0) {
p_err("perf_query_support: cannot open directory \"/\" (%s)",
strerror(errno));
goto out;
}
/* the following query will fail as no bpf attachment,
* the expected errno is ENOTSUPP
*/
errno = 0;
len = sizeof(buf);
bpf_task_fd_query(getpid(), fd, 0, buf, &len, &prog_id,
&fd_type, &probe_offset, &probe_addr);
if (errno == 524 /* ENOTSUPP */) {
perf_query_supported = 1;
goto close_fd;
}
perf_query_supported = 2;
p_err("perf_query_support: %s", strerror(errno));
fprintf(stderr,
"HINT: non root or kernel doesn't support TASK_FD_QUERY\n");
close_fd:
close(fd);
out:
return perf_query_supported == 1;
}
static void print_perf_json(int pid, int fd, __u32 prog_id, __u32 fd_type,
char *buf, __u64 probe_offset, __u64 probe_addr)
{
jsonw_start_object(json_wtr);
jsonw_int_field(json_wtr, "pid", pid);
jsonw_int_field(json_wtr, "fd", fd);
jsonw_uint_field(json_wtr, "prog_id", prog_id);
switch (fd_type) {
case BPF_FD_TYPE_RAW_TRACEPOINT:
jsonw_string_field(json_wtr, "fd_type", "raw_tracepoint");
jsonw_string_field(json_wtr, "tracepoint", buf);
break;
case BPF_FD_TYPE_TRACEPOINT:
jsonw_string_field(json_wtr, "fd_type", "tracepoint");
jsonw_string_field(json_wtr, "tracepoint", buf);
break;
case BPF_FD_TYPE_KPROBE:
jsonw_string_field(json_wtr, "fd_type", "kprobe");
if (buf[0] != '\0') {
jsonw_string_field(json_wtr, "func", buf);
jsonw_lluint_field(json_wtr, "offset", probe_offset);
} else {
jsonw_lluint_field(json_wtr, "addr", probe_addr);
}
break;
case BPF_FD_TYPE_KRETPROBE:
jsonw_string_field(json_wtr, "fd_type", "kretprobe");
if (buf[0] != '\0') {
jsonw_string_field(json_wtr, "func", buf);
jsonw_lluint_field(json_wtr, "offset", probe_offset);
} else {
jsonw_lluint_field(json_wtr, "addr", probe_addr);
}
break;
case BPF_FD_TYPE_UPROBE:
jsonw_string_field(json_wtr, "fd_type", "uprobe");
jsonw_string_field(json_wtr, "filename", buf);
jsonw_lluint_field(json_wtr, "offset", probe_offset);
break;
case BPF_FD_TYPE_URETPROBE:
jsonw_string_field(json_wtr, "fd_type", "uretprobe");
jsonw_string_field(json_wtr, "filename", buf);
jsonw_lluint_field(json_wtr, "offset", probe_offset);
break;
default:
break;
}
jsonw_end_object(json_wtr);
}
static void print_perf_plain(int pid, int fd, __u32 prog_id, __u32 fd_type,
char *buf, __u64 probe_offset, __u64 probe_addr)
{
printf("pid %d fd %d: prog_id %u ", pid, fd, prog_id);
switch (fd_type) {
case BPF_FD_TYPE_RAW_TRACEPOINT:
printf("raw_tracepoint %s\n", buf);
break;
case BPF_FD_TYPE_TRACEPOINT:
printf("tracepoint %s\n", buf);
break;
case BPF_FD_TYPE_KPROBE:
if (buf[0] != '\0')
printf("kprobe func %s offset %llu\n", buf,
probe_offset);
else
printf("kprobe addr %llu\n", probe_addr);
break;
case BPF_FD_TYPE_KRETPROBE:
if (buf[0] != '\0')
printf("kretprobe func %s offset %llu\n", buf,
probe_offset);
else
printf("kretprobe addr %llu\n", probe_addr);
break;
case BPF_FD_TYPE_UPROBE:
printf("uprobe filename %s offset %llu\n", buf, probe_offset);
break;
case BPF_FD_TYPE_URETPROBE:
printf("uretprobe filename %s offset %llu\n", buf,
probe_offset);
break;
default:
break;
}
}
static int show_proc(void)
{
struct dirent *proc_de, *pid_fd_de;
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
DIR *proc, *pid_fd;
int err, pid, fd;
const char *pch;
char buf[4096];
proc = opendir("/proc");
if (!proc)
return -1;
while ((proc_de = readdir(proc))) {
pid = 0;
pch = proc_de->d_name;
/* pid should be all numbers */
while (isdigit(*pch)) {
pid = pid * 10 + *pch - '0';
pch++;
}
if (*pch != '\0')
continue;
err = snprintf(buf, sizeof(buf), "/proc/%s/fd", proc_de->d_name);
if (err < 0 || err >= (int)sizeof(buf))
continue;
pid_fd = opendir(buf);
if (!pid_fd)
continue;
while ((pid_fd_de = readdir(pid_fd))) {
fd = 0;
pch = pid_fd_de->d_name;
/* fd should be all numbers */
while (isdigit(*pch)) {
fd = fd * 10 + *pch - '0';
pch++;
}
if (*pch != '\0')
continue;
/* query (pid, fd) for potential perf events */
len = sizeof(buf);
err = bpf_task_fd_query(pid, fd, 0, buf, &len,
&prog_id, &fd_type,
&probe_offset, &probe_addr);
if (err < 0)
continue;
if (json_output)
print_perf_json(pid, fd, prog_id, fd_type, buf,
probe_offset, probe_addr);
else
print_perf_plain(pid, fd, prog_id, fd_type, buf,
probe_offset, probe_addr);
}
closedir(pid_fd);
}
closedir(proc);
return 0;
}
static int do_show(int argc, char **argv)
{
int err;
if (!has_perf_query_support())
return -1;
if (json_output)
jsonw_start_array(json_wtr);
err = show_proc();
if (json_output)
jsonw_end_array(json_wtr);
return err;
}
static int do_help(int argc, char **argv)
{
fprintf(stderr,
"Usage: %1$s %2$s { show | list }\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_OPTIONS " }\n"
"",
bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
{ "help", do_help },
{ 0 }
};
int do_perf(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/perf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Based on:
*
* Minimal BPF JIT image disassembler
*
* Disassembles BPF JIT compiler emitted opcodes back to asm insn's for
* debugging or verification purposes.
*
* Copyright 2013 Daniel Borkmann <[email protected]>
* Licensed under the GNU General Public License, version 2.0 (GPLv2)
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdio.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sys/stat.h>
#include <limits.h>
#include <bpf/libbpf.h>
#ifdef HAVE_LLVM_SUPPORT
#include <llvm-c/Core.h>
#include <llvm-c/Disassembler.h>
#include <llvm-c/Target.h>
#include <llvm-c/TargetMachine.h>
#endif
#ifdef HAVE_LIBBFD_SUPPORT
#include <bfd.h>
#include <dis-asm.h>
#include <tools/dis-asm-compat.h>
#endif
#include "json_writer.h"
#include "main.h"
static int oper_count;
#ifdef HAVE_LLVM_SUPPORT
#define DISASM_SPACER
typedef LLVMDisasmContextRef disasm_ctx_t;
static int printf_json(char *s)
{
s = strtok(s, " \t");
jsonw_string_field(json_wtr, "operation", s);
jsonw_name(json_wtr, "operands");
jsonw_start_array(json_wtr);
oper_count = 1;
while ((s = strtok(NULL, " \t,()")) != 0) {
jsonw_string(json_wtr, s);
oper_count++;
}
return 0;
}
/* This callback to set the ref_type is necessary to have the LLVM disassembler
* print PC-relative addresses instead of byte offsets for branch instruction
* targets.
*/
static const char *
symbol_lookup_callback(__maybe_unused void *disasm_info,
__maybe_unused uint64_t ref_value,
uint64_t *ref_type, __maybe_unused uint64_t ref_PC,
__maybe_unused const char **ref_name)
{
*ref_type = LLVMDisassembler_ReferenceType_InOut_None;
return NULL;
}
static int
init_context(disasm_ctx_t *ctx, const char *arch,
__maybe_unused const char *disassembler_options,
__maybe_unused unsigned char *image, __maybe_unused ssize_t len)
{
char *triple;
if (arch)
triple = LLVMNormalizeTargetTriple(arch);
else
triple = LLVMGetDefaultTargetTriple();
if (!triple) {
p_err("Failed to retrieve triple");
return -1;
}
*ctx = LLVMCreateDisasm(triple, NULL, 0, NULL, symbol_lookup_callback);
LLVMDisposeMessage(triple);
if (!*ctx) {
p_err("Failed to create disassembler");
return -1;
}
return 0;
}
static void destroy_context(disasm_ctx_t *ctx)
{
LLVMDisposeMessage(*ctx);
}
static int
disassemble_insn(disasm_ctx_t *ctx, unsigned char *image, ssize_t len, int pc)
{
char buf[256];
int count;
count = LLVMDisasmInstruction(*ctx, image + pc, len - pc, pc,
buf, sizeof(buf));
if (json_output)
printf_json(buf);
else
printf("%s", buf);
return count;
}
int disasm_init(void)
{
LLVMInitializeAllTargetInfos();
LLVMInitializeAllTargetMCs();
LLVMInitializeAllDisassemblers();
return 0;
}
#endif /* HAVE_LLVM_SUPPORT */
#ifdef HAVE_LIBBFD_SUPPORT
#define DISASM_SPACER "\t"
typedef struct {
struct disassemble_info *info;
disassembler_ftype disassemble;
bfd *bfdf;
} disasm_ctx_t;
static int get_exec_path(char *tpath, size_t size)
{
const char *path = "/proc/self/exe";
ssize_t len;
len = readlink(path, tpath, size - 1);
if (len <= 0)
return -1;
tpath[len] = 0;
return 0;
}
static int printf_json(void *out, const char *fmt, va_list ap)
{
char *s;
int err;
err = vasprintf(&s, fmt, ap);
if (err < 0)
return -1;
if (!oper_count) {
int i;
/* Strip trailing spaces */
i = strlen(s) - 1;
while (s[i] == ' ')
s[i--] = '\0';
jsonw_string_field(json_wtr, "operation", s);
jsonw_name(json_wtr, "operands");
jsonw_start_array(json_wtr);
oper_count++;
} else if (!strcmp(fmt, ",")) {
/* Skip */
} else {
jsonw_string(json_wtr, s);
oper_count++;
}
free(s);
return 0;
}
static int fprintf_json(void *out, const char *fmt, ...)
{
va_list ap;
int r;
va_start(ap, fmt);
r = printf_json(out, fmt, ap);
va_end(ap);
return r;
}
static int fprintf_json_styled(void *out,
enum disassembler_style style __maybe_unused,
const char *fmt, ...)
{
va_list ap;
int r;
va_start(ap, fmt);
r = printf_json(out, fmt, ap);
va_end(ap);
return r;
}
static int init_context(disasm_ctx_t *ctx, const char *arch,
const char *disassembler_options,
unsigned char *image, ssize_t len)
{
struct disassemble_info *info;
char tpath[PATH_MAX];
bfd *bfdf;
memset(tpath, 0, sizeof(tpath));
if (get_exec_path(tpath, sizeof(tpath))) {
p_err("failed to create disassembler (get_exec_path)");
return -1;
}
ctx->bfdf = bfd_openr(tpath, NULL);
if (!ctx->bfdf) {
p_err("failed to create disassembler (bfd_openr)");
return -1;
}
if (!bfd_check_format(ctx->bfdf, bfd_object)) {
p_err("failed to create disassembler (bfd_check_format)");
goto err_close;
}
bfdf = ctx->bfdf;
ctx->info = malloc(sizeof(struct disassemble_info));
if (!ctx->info) {
p_err("mem alloc failed");
goto err_close;
}
info = ctx->info;
if (json_output)
init_disassemble_info_compat(info, stdout,
(fprintf_ftype) fprintf_json,
fprintf_json_styled);
else
init_disassemble_info_compat(info, stdout,
(fprintf_ftype) fprintf,
fprintf_styled);
/* Update architecture info for offload. */
if (arch) {
const bfd_arch_info_type *inf = bfd_scan_arch(arch);
if (inf) {
bfdf->arch_info = inf;
} else {
p_err("No libbfd support for %s", arch);
goto err_free;
}
}
info->arch = bfd_get_arch(bfdf);
info->mach = bfd_get_mach(bfdf);
if (disassembler_options)
info->disassembler_options = disassembler_options;
info->buffer = image;
info->buffer_length = len;
disassemble_init_for_target(info);
#ifdef DISASM_FOUR_ARGS_SIGNATURE
ctx->disassemble = disassembler(info->arch,
bfd_big_endian(bfdf),
info->mach,
bfdf);
#else
ctx->disassemble = disassembler(bfdf);
#endif
if (!ctx->disassemble) {
p_err("failed to create disassembler");
goto err_free;
}
return 0;
err_free:
free(info);
err_close:
bfd_close(ctx->bfdf);
return -1;
}
static void destroy_context(disasm_ctx_t *ctx)
{
free(ctx->info);
bfd_close(ctx->bfdf);
}
static int
disassemble_insn(disasm_ctx_t *ctx, __maybe_unused unsigned char *image,
__maybe_unused ssize_t len, int pc)
{
return ctx->disassemble(pc, ctx->info);
}
int disasm_init(void)
{
bfd_init();
return 0;
}
#endif /* HAVE_LIBBPFD_SUPPORT */
int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
const char *arch, const char *disassembler_options,
const struct btf *btf,
const struct bpf_prog_linfo *prog_linfo,
__u64 func_ksym, unsigned int func_idx,
bool linum)
{
const struct bpf_line_info *linfo = NULL;
unsigned int nr_skip = 0;
int count, i, pc = 0;
disasm_ctx_t ctx;
if (!len)
return -1;
if (init_context(&ctx, arch, disassembler_options, image, len))
return -1;
if (json_output)
jsonw_start_array(json_wtr);
do {
if (prog_linfo) {
linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
func_ksym + pc,
func_idx,
nr_skip);
if (linfo)
nr_skip++;
}
if (json_output) {
jsonw_start_object(json_wtr);
oper_count = 0;
if (linfo)
btf_dump_linfo_json(btf, linfo, linum);
jsonw_name(json_wtr, "pc");
jsonw_printf(json_wtr, "\"0x%x\"", pc);
} else {
if (linfo)
btf_dump_linfo_plain(btf, linfo, "; ",
linum);
printf("%4x:" DISASM_SPACER, pc);
}
count = disassemble_insn(&ctx, image, len, pc);
if (json_output) {
/* Operand array, was started in fprintf_json. Before
* that, make sure we have a _null_ value if no operand
* other than operation code was present.
*/
if (oper_count == 1)
jsonw_null(json_wtr);
jsonw_end_array(json_wtr);
}
if (opcodes) {
if (json_output) {
jsonw_name(json_wtr, "opcodes");
jsonw_start_array(json_wtr);
for (i = 0; i < count; ++i)
jsonw_printf(json_wtr, "\"0x%02hhx\"",
(uint8_t)image[pc + i]);
jsonw_end_array(json_wtr);
} else {
printf("\n\t");
for (i = 0; i < count; ++i)
printf("%02x ",
(uint8_t)image[pc + i]);
}
}
if (json_output)
jsonw_end_object(json_wtr);
else
printf("\n");
pc += count;
} while (count > 0 && pc < len);
if (json_output)
jsonw_end_array(json_wtr);
destroy_context(&ctx);
return 0;
}
| linux-master | tools/bpf/bpftool/jit_disasm.c |
// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
/*
* Simple streaming JSON writer
*
* This takes care of the annoying bits of JSON syntax like the commas
* after elements
*
* Authors: Stephen Hemminger <[email protected]>
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdarg.h>
#include <assert.h>
#include <malloc.h>
#include <inttypes.h>
#include <stdint.h>
#include "json_writer.h"
struct json_writer {
FILE *out; /* output file */
unsigned depth; /* nesting */
bool pretty; /* optional whitepace */
char sep; /* either nul or comma */
};
/* indentation for pretty print */
static void jsonw_indent(json_writer_t *self)
{
unsigned i;
for (i = 0; i < self->depth; ++i)
fputs(" ", self->out);
}
/* end current line and indent if pretty printing */
static void jsonw_eol(json_writer_t *self)
{
if (!self->pretty)
return;
putc('\n', self->out);
jsonw_indent(self);
}
/* If current object is not empty print a comma */
static void jsonw_eor(json_writer_t *self)
{
if (self->sep != '\0')
putc(self->sep, self->out);
self->sep = ',';
}
/* Output JSON encoded string */
/* Handles C escapes, does not do Unicode */
static void jsonw_puts(json_writer_t *self, const char *str)
{
putc('"', self->out);
for (; *str; ++str)
switch (*str) {
case '\t':
fputs("\\t", self->out);
break;
case '\n':
fputs("\\n", self->out);
break;
case '\r':
fputs("\\r", self->out);
break;
case '\f':
fputs("\\f", self->out);
break;
case '\b':
fputs("\\b", self->out);
break;
case '\\':
fputs("\\\\", self->out);
break;
case '"':
fputs("\\\"", self->out);
break;
default:
putc(*str, self->out);
}
putc('"', self->out);
}
/* Create a new JSON stream */
json_writer_t *jsonw_new(FILE *f)
{
json_writer_t *self = malloc(sizeof(*self));
if (self) {
self->out = f;
self->depth = 0;
self->pretty = false;
self->sep = '\0';
}
return self;
}
/* End output to JSON stream */
void jsonw_destroy(json_writer_t **self_p)
{
json_writer_t *self = *self_p;
assert(self->depth == 0);
fputs("\n", self->out);
fflush(self->out);
free(self);
*self_p = NULL;
}
void jsonw_pretty(json_writer_t *self, bool on)
{
self->pretty = on;
}
void jsonw_reset(json_writer_t *self)
{
assert(self->depth == 0);
self->sep = '\0';
}
/* Basic blocks */
static void jsonw_begin(json_writer_t *self, int c)
{
jsonw_eor(self);
putc(c, self->out);
++self->depth;
self->sep = '\0';
}
static void jsonw_end(json_writer_t *self, int c)
{
assert(self->depth > 0);
--self->depth;
if (self->sep != '\0')
jsonw_eol(self);
putc(c, self->out);
self->sep = ',';
}
/* Add a JSON property name */
void jsonw_name(json_writer_t *self, const char *name)
{
jsonw_eor(self);
jsonw_eol(self);
self->sep = '\0';
jsonw_puts(self, name);
putc(':', self->out);
if (self->pretty)
putc(' ', self->out);
}
void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
{
jsonw_eor(self);
putc('"', self->out);
vfprintf(self->out, fmt, ap);
putc('"', self->out);
}
void jsonw_printf(json_writer_t *self, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
jsonw_eor(self);
vfprintf(self->out, fmt, ap);
va_end(ap);
}
/* Collections */
void jsonw_start_object(json_writer_t *self)
{
jsonw_begin(self, '{');
}
void jsonw_end_object(json_writer_t *self)
{
jsonw_end(self, '}');
}
void jsonw_start_array(json_writer_t *self)
{
jsonw_begin(self, '[');
}
void jsonw_end_array(json_writer_t *self)
{
jsonw_end(self, ']');
}
/* JSON value types */
void jsonw_string(json_writer_t *self, const char *value)
{
jsonw_eor(self);
jsonw_puts(self, value);
}
void jsonw_bool(json_writer_t *self, bool val)
{
jsonw_printf(self, "%s", val ? "true" : "false");
}
void jsonw_null(json_writer_t *self)
{
jsonw_printf(self, "null");
}
void jsonw_float_fmt(json_writer_t *self, const char *fmt, double num)
{
jsonw_printf(self, fmt, num);
}
#ifdef notused
void jsonw_float(json_writer_t *self, double num)
{
jsonw_printf(self, "%g", num);
}
#endif
void jsonw_hu(json_writer_t *self, unsigned short num)
{
jsonw_printf(self, "%hu", num);
}
void jsonw_uint(json_writer_t *self, uint64_t num)
{
jsonw_printf(self, "%"PRIu64, num);
}
void jsonw_lluint(json_writer_t *self, unsigned long long int num)
{
jsonw_printf(self, "%llu", num);
}
void jsonw_int(json_writer_t *self, int64_t num)
{
jsonw_printf(self, "%"PRId64, num);
}
/* Basic name/value objects */
void jsonw_string_field(json_writer_t *self, const char *prop, const char *val)
{
jsonw_name(self, prop);
jsonw_string(self, val);
}
void jsonw_bool_field(json_writer_t *self, const char *prop, bool val)
{
jsonw_name(self, prop);
jsonw_bool(self, val);
}
#ifdef notused
void jsonw_float_field(json_writer_t *self, const char *prop, double val)
{
jsonw_name(self, prop);
jsonw_float(self, val);
}
#endif
void jsonw_float_field_fmt(json_writer_t *self,
const char *prop,
const char *fmt,
double val)
{
jsonw_name(self, prop);
jsonw_float_fmt(self, fmt, val);
}
void jsonw_uint_field(json_writer_t *self, const char *prop, uint64_t num)
{
jsonw_name(self, prop);
jsonw_uint(self, num);
}
void jsonw_hu_field(json_writer_t *self, const char *prop, unsigned short num)
{
jsonw_name(self, prop);
jsonw_hu(self, num);
}
void jsonw_lluint_field(json_writer_t *self,
const char *prop,
unsigned long long int num)
{
jsonw_name(self, prop);
jsonw_lluint(self, num);
}
void jsonw_int_field(json_writer_t *self, const char *prop, int64_t num)
{
jsonw_name(self, prop);
jsonw_int(self, num);
}
void jsonw_null_field(json_writer_t *self, const char *prop)
{
jsonw_name(self, prop);
jsonw_null(self);
}
#ifdef TEST
int main(int argc, char **argv)
{
json_writer_t *wr = jsonw_new(stdout);
jsonw_start_object(wr);
jsonw_pretty(wr, true);
jsonw_name(wr, "Vyatta");
jsonw_start_object(wr);
jsonw_string_field(wr, "url", "http://vyatta.com");
jsonw_uint_field(wr, "downloads", 2000000ul);
jsonw_float_field(wr, "stock", 8.16);
jsonw_name(wr, "ARGV");
jsonw_start_array(wr);
while (--argc)
jsonw_string(wr, *++argv);
jsonw_end_array(wr);
jsonw_name(wr, "empty");
jsonw_start_array(wr);
jsonw_end_array(wr);
jsonw_name(wr, "NIL");
jsonw_start_object(wr);
jsonw_end_object(wr);
jsonw_null_field(wr, "my_null");
jsonw_name(wr, "special chars");
jsonw_start_array(wr);
jsonw_string_field(wr, "slash", "/");
jsonw_string_field(wr, "newline", "\n");
jsonw_string_field(wr, "tab", "\t");
jsonw_string_field(wr, "ff", "\f");
jsonw_string_field(wr, "quote", "\"");
jsonw_string_field(wr, "tick", "\'");
jsonw_string_field(wr, "backslash", "\\");
jsonw_end_array(wr);
jsonw_end_object(wr);
jsonw_end_object(wr);
jsonw_destroy(&wr);
return 0;
}
#endif
| linux-master | tools/bpf/bpftool/json_writer.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
/* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <errno.h>
#include <fcntl.h>
#include <bpf/libbpf.h>
#include <poll.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <linux/bpf.h>
#include <linux/perf_event.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <bpf/bpf.h>
#include "main.h"
#define MMAP_PAGE_CNT 16
static volatile bool stop;
struct perf_event_sample {
struct perf_event_header header;
__u64 time;
__u32 size;
unsigned char data[];
};
struct perf_event_lost {
struct perf_event_header header;
__u64 id;
__u64 lost;
};
static void int_exit(int signo)
{
fprintf(stderr, "Stopping...\n");
stop = true;
}
struct event_pipe_ctx {
bool all_cpus;
int cpu;
int idx;
};
static enum bpf_perf_event_ret
print_bpf_output(void *private_data, int cpu, struct perf_event_header *event)
{
struct perf_event_sample *e = container_of(event,
struct perf_event_sample,
header);
struct perf_event_lost *lost = container_of(event,
struct perf_event_lost,
header);
struct event_pipe_ctx *ctx = private_data;
int idx = ctx->all_cpus ? cpu : ctx->idx;
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "type");
jsonw_uint(json_wtr, e->header.type);
jsonw_name(json_wtr, "cpu");
jsonw_uint(json_wtr, cpu);
jsonw_name(json_wtr, "index");
jsonw_uint(json_wtr, idx);
if (e->header.type == PERF_RECORD_SAMPLE) {
jsonw_name(json_wtr, "timestamp");
jsonw_uint(json_wtr, e->time);
jsonw_name(json_wtr, "data");
print_data_json(e->data, e->size);
} else if (e->header.type == PERF_RECORD_LOST) {
jsonw_name(json_wtr, "lost");
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "id");
jsonw_uint(json_wtr, lost->id);
jsonw_name(json_wtr, "count");
jsonw_uint(json_wtr, lost->lost);
jsonw_end_object(json_wtr);
}
jsonw_end_object(json_wtr);
} else {
if (e->header.type == PERF_RECORD_SAMPLE) {
printf("== @%lld.%09lld CPU: %d index: %d =====\n",
e->time / 1000000000ULL, e->time % 1000000000ULL,
cpu, idx);
fprint_hex(stdout, e->data, e->size, " ");
printf("\n");
} else if (e->header.type == PERF_RECORD_LOST) {
printf("lost %lld events\n", lost->lost);
} else {
printf("unknown event type=%d size=%d\n",
e->header.type, e->header.size);
}
}
return LIBBPF_PERF_EVENT_CONT;
}
int do_event_pipe(int argc, char **argv)
{
struct perf_event_attr perf_attr = {
.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_BPF_OUTPUT,
.sample_period = 1,
.wakeup_events = 1,
};
struct bpf_map_info map_info = {};
LIBBPF_OPTS(perf_buffer_raw_opts, opts);
struct event_pipe_ctx ctx = {
.all_cpus = true,
.cpu = -1,
.idx = -1,
};
struct perf_buffer *pb;
__u32 map_info_len;
int err, map_fd;
map_info_len = sizeof(map_info);
map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
if (map_fd < 0)
return -1;
if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
p_err("map is not a perf event array");
goto err_close_map;
}
while (argc) {
if (argc < 2) {
BAD_ARG();
goto err_close_map;
}
if (is_prefix(*argv, "cpu")) {
char *endptr;
NEXT_ARG();
ctx.cpu = strtoul(*argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as CPU ID", *argv);
goto err_close_map;
}
NEXT_ARG();
} else if (is_prefix(*argv, "index")) {
char *endptr;
NEXT_ARG();
ctx.idx = strtoul(*argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as index", *argv);
goto err_close_map;
}
NEXT_ARG();
} else {
BAD_ARG();
goto err_close_map;
}
ctx.all_cpus = false;
}
if (!ctx.all_cpus) {
if (ctx.idx == -1 || ctx.cpu == -1) {
p_err("cpu and index must be specified together");
goto err_close_map;
}
} else {
ctx.cpu = 0;
ctx.idx = 0;
}
opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
opts.cpus = &ctx.cpu;
opts.map_keys = &ctx.idx;
pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr,
print_bpf_output, &ctx, &opts);
if (!pb) {
p_err("failed to create perf buffer: %s (%d)",
strerror(errno), errno);
goto err_close_map;
}
signal(SIGINT, int_exit);
signal(SIGHUP, int_exit);
signal(SIGTERM, int_exit);
if (json_output)
jsonw_start_array(json_wtr);
while (!stop) {
err = perf_buffer__poll(pb, 200);
if (err < 0 && err != -EINTR) {
p_err("perf buffer polling failed: %s (%d)",
strerror(errno), errno);
goto err_close_pb;
}
}
if (json_output)
jsonw_end_array(json_wtr);
perf_buffer__free(pb);
close(map_fd);
return 0;
err_close_pb:
perf_buffer__free(pb);
err_close_map:
close(map_fd);
return -1;
}
| linux-master | tools/bpf/bpftool/map_perf_ring.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2020 Facebook */
#include <errno.h>
#include <linux/err.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/hashmap.h>
#include "main.h"
#include "skeleton/pid_iter.h"
#ifdef BPFTOOL_WITHOUT_SKELETONS
int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
{
return -ENOTSUP;
}
void delete_obj_refs_table(struct hashmap *map) {}
void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
#else /* BPFTOOL_WITHOUT_SKELETONS */
#include "pid_iter.skel.h"
static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
{
struct hashmap_entry *entry;
struct obj_refs *refs;
struct obj_ref *ref;
int err, i;
void *tmp;
hashmap__for_each_key_entry(map, entry, e->id) {
refs = entry->pvalue;
for (i = 0; i < refs->ref_cnt; i++) {
if (refs->refs[i].pid == e->pid)
return;
}
tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref));
if (!tmp) {
p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...",
e->id, e->pid, e->comm);
return;
}
refs->refs = tmp;
ref = &refs->refs[refs->ref_cnt];
ref->pid = e->pid;
memcpy(ref->comm, e->comm, sizeof(ref->comm));
refs->ref_cnt++;
return;
}
/* new ref */
refs = calloc(1, sizeof(*refs));
if (!refs) {
p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
e->id, e->pid, e->comm);
return;
}
refs->refs = malloc(sizeof(*refs->refs));
if (!refs->refs) {
free(refs);
p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
e->id, e->pid, e->comm);
return;
}
ref = &refs->refs[0];
ref->pid = e->pid;
memcpy(ref->comm, e->comm, sizeof(ref->comm));
refs->ref_cnt = 1;
refs->has_bpf_cookie = e->has_bpf_cookie;
refs->bpf_cookie = e->bpf_cookie;
err = hashmap__append(map, e->id, refs);
if (err)
p_err("failed to append entry to hashmap for ID %u: %s",
e->id, strerror(errno));
}
static int __printf(2, 0)
libbpf_print_none(__maybe_unused enum libbpf_print_level level,
__maybe_unused const char *format,
__maybe_unused va_list args)
{
return 0;
}
int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
{
struct pid_iter_entry *e;
char buf[4096 / sizeof(*e) * sizeof(*e)];
struct pid_iter_bpf *skel;
int err, ret, fd = -1, i;
libbpf_print_fn_t default_print;
*map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
if (IS_ERR(*map)) {
p_err("failed to create hashmap for PID references");
return -1;
}
set_max_rlimit();
skel = pid_iter_bpf__open();
if (!skel) {
p_err("failed to open PID iterator skeleton");
return -1;
}
skel->rodata->obj_type = type;
/* we don't want output polluted with libbpf errors if bpf_iter is not
* supported
*/
default_print = libbpf_set_print(libbpf_print_none);
err = pid_iter_bpf__load(skel);
libbpf_set_print(default_print);
if (err) {
/* too bad, kernel doesn't support BPF iterators yet */
err = 0;
goto out;
}
err = pid_iter_bpf__attach(skel);
if (err) {
/* if we loaded above successfully, attach has to succeed */
p_err("failed to attach PID iterator: %d", err);
goto out;
}
fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
if (fd < 0) {
err = -errno;
p_err("failed to create PID iterator session: %d", err);
goto out;
}
while (true) {
ret = read(fd, buf, sizeof(buf));
if (ret < 0) {
if (errno == EAGAIN)
continue;
err = -errno;
p_err("failed to read PID iterator output: %d", err);
goto out;
}
if (ret == 0)
break;
if (ret % sizeof(*e)) {
err = -EINVAL;
p_err("invalid PID iterator output format");
goto out;
}
ret /= sizeof(*e);
e = (void *)buf;
for (i = 0; i < ret; i++, e++) {
add_ref(*map, e);
}
}
err = 0;
out:
if (fd >= 0)
close(fd);
pid_iter_bpf__destroy(skel);
return err;
}
void delete_obj_refs_table(struct hashmap *map)
{
struct hashmap_entry *entry;
size_t bkt;
if (!map)
return;
hashmap__for_each_entry(map, entry, bkt) {
struct obj_refs *refs = entry->pvalue;
free(refs->refs);
free(refs);
}
hashmap__free(map);
}
void emit_obj_refs_json(struct hashmap *map, __u32 id,
json_writer_t *json_writer)
{
struct hashmap_entry *entry;
if (hashmap__empty(map))
return;
hashmap__for_each_key_entry(map, entry, id) {
struct obj_refs *refs = entry->pvalue;
int i;
if (refs->ref_cnt == 0)
break;
if (refs->has_bpf_cookie)
jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie);
jsonw_name(json_writer, "pids");
jsonw_start_array(json_writer);
for (i = 0; i < refs->ref_cnt; i++) {
struct obj_ref *ref = &refs->refs[i];
jsonw_start_object(json_writer);
jsonw_int_field(json_writer, "pid", ref->pid);
jsonw_string_field(json_writer, "comm", ref->comm);
jsonw_end_object(json_writer);
}
jsonw_end_array(json_writer);
break;
}
}
void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
{
struct hashmap_entry *entry;
if (hashmap__empty(map))
return;
hashmap__for_each_key_entry(map, entry, id) {
struct obj_refs *refs = entry->pvalue;
int i;
if (refs->ref_cnt == 0)
break;
if (refs->has_bpf_cookie)
printf("\n\tbpf_cookie %llu", (unsigned long long) refs->bpf_cookie);
printf("%s", prefix);
for (i = 0; i < refs->ref_cnt; i++) {
struct obj_ref *ref = &refs->refs[i];
printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
}
break;
}
}
#endif
| linux-master | tools/bpf/bpftool/pids.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
#include <linux/bpf.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h>
#include "main.h"
#define BATCH_LINE_LEN_MAX 65536
#define BATCH_ARG_NB_MAX 4096
const char *bin_name;
static int last_argc;
static char **last_argv;
static int (*last_do_help)(int argc, char **argv);
json_writer_t *json_wtr;
bool pretty_output;
bool json_output;
bool show_pinned;
bool block_mount;
bool verifier_logs;
bool relaxed_maps;
bool use_loader;
struct btf *base_btf;
struct hashmap *refs_table;
static void __noreturn clean_and_exit(int i)
{
if (json_output)
jsonw_destroy(&json_wtr);
exit(i);
}
void usage(void)
{
last_do_help(last_argc - 1, last_argv + 1);
clean_and_exit(-1);
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %s [OPTIONS] OBJECT { COMMAND | help }\n"
" %s batch file FILE\n"
" %s version\n"
"\n"
" OBJECT := { prog | map | link | cgroup | perf | net | feature | btf | gen | struct_ops | iter }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-V|--version} }\n"
"",
bin_name, bin_name, bin_name);
return 0;
}
static int do_batch(int argc, char **argv);
static int do_version(int argc, char **argv);
static const struct cmd commands[] = {
{ "help", do_help },
{ "batch", do_batch },
{ "prog", do_prog },
{ "map", do_map },
{ "link", do_link },
{ "cgroup", do_cgroup },
{ "perf", do_perf },
{ "net", do_net },
{ "feature", do_feature },
{ "btf", do_btf },
{ "gen", do_gen },
{ "struct_ops", do_struct_ops },
{ "iter", do_iter },
{ "version", do_version },
{ 0 }
};
#ifndef BPFTOOL_VERSION
/* bpftool's major and minor version numbers are aligned on libbpf's. There is
* an offset of 6 for the version number, because bpftool's version was higher
* than libbpf's when we adopted this scheme. The patch number remains at 0
* for now. Set BPFTOOL_VERSION to override.
*/
#define BPFTOOL_MAJOR_VERSION (LIBBPF_MAJOR_VERSION + 6)
#define BPFTOOL_MINOR_VERSION LIBBPF_MINOR_VERSION
#define BPFTOOL_PATCH_VERSION 0
#endif
static void
print_feature(const char *feature, bool state, unsigned int *nb_features)
{
if (state) {
printf("%s %s", *nb_features ? "," : "", feature);
*nb_features = *nb_features + 1;
}
}
static int do_version(int argc, char **argv)
{
#ifdef HAVE_LIBBFD_SUPPORT
const bool has_libbfd = true;
#else
const bool has_libbfd = false;
#endif
#ifdef HAVE_LLVM_SUPPORT
const bool has_llvm = true;
#else
const bool has_llvm = false;
#endif
#ifdef BPFTOOL_WITHOUT_SKELETONS
const bool has_skeletons = false;
#else
const bool has_skeletons = true;
#endif
bool bootstrap = false;
int i;
for (i = 0; commands[i].cmd; i++) {
if (!strcmp(commands[i].cmd, "prog")) {
/* Assume we run a bootstrap version if "bpftool prog"
* is not available.
*/
bootstrap = !commands[i].func;
break;
}
}
if (json_output) {
jsonw_start_object(json_wtr); /* root object */
jsonw_name(json_wtr, "version");
#ifdef BPFTOOL_VERSION
jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
#else
jsonw_printf(json_wtr, "\"%d.%d.%d\"", BPFTOOL_MAJOR_VERSION,
BPFTOOL_MINOR_VERSION, BPFTOOL_PATCH_VERSION);
#endif
jsonw_name(json_wtr, "libbpf_version");
jsonw_printf(json_wtr, "\"%d.%d\"",
libbpf_major_version(), libbpf_minor_version());
jsonw_name(json_wtr, "features");
jsonw_start_object(json_wtr); /* features */
jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
jsonw_bool_field(json_wtr, "llvm", has_llvm);
jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
jsonw_bool_field(json_wtr, "bootstrap", bootstrap);
jsonw_end_object(json_wtr); /* features */
jsonw_end_object(json_wtr); /* root object */
} else {
unsigned int nb_features = 0;
#ifdef BPFTOOL_VERSION
printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
#else
printf("%s v%d.%d.%d\n", bin_name, BPFTOOL_MAJOR_VERSION,
BPFTOOL_MINOR_VERSION, BPFTOOL_PATCH_VERSION);
#endif
printf("using libbpf %s\n", libbpf_version_string());
printf("features:");
print_feature("libbfd", has_libbfd, &nb_features);
print_feature("llvm", has_llvm, &nb_features);
print_feature("skeletons", has_skeletons, &nb_features);
print_feature("bootstrap", bootstrap, &nb_features);
printf("\n");
}
return 0;
}
int cmd_select(const struct cmd *cmds, int argc, char **argv,
int (*help)(int argc, char **argv))
{
unsigned int i;
last_argc = argc;
last_argv = argv;
last_do_help = help;
if (argc < 1 && cmds[0].func)
return cmds[0].func(argc, argv);
for (i = 0; cmds[i].cmd; i++) {
if (is_prefix(*argv, cmds[i].cmd)) {
if (!cmds[i].func) {
p_err("command '%s' is not supported in bootstrap mode",
cmds[i].cmd);
return -1;
}
return cmds[i].func(argc - 1, argv + 1);
}
}
help(argc - 1, argv + 1);
return -1;
}
bool is_prefix(const char *pfx, const char *str)
{
if (!pfx)
return false;
if (strlen(str) < strlen(pfx))
return false;
return !memcmp(str, pfx, strlen(pfx));
}
/* Last argument MUST be NULL pointer */
int detect_common_prefix(const char *arg, ...)
{
unsigned int count = 0;
const char *ref;
char msg[256];
va_list ap;
snprintf(msg, sizeof(msg), "ambiguous prefix: '%s' could be '", arg);
va_start(ap, arg);
while ((ref = va_arg(ap, const char *))) {
if (!is_prefix(arg, ref))
continue;
count++;
if (count > 1)
strncat(msg, "' or '", sizeof(msg) - strlen(msg) - 1);
strncat(msg, ref, sizeof(msg) - strlen(msg) - 1);
}
va_end(ap);
strncat(msg, "'", sizeof(msg) - strlen(msg) - 1);
if (count >= 2) {
p_err("%s", msg);
return -1;
}
return 0;
}
void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep)
{
unsigned char *data = arg;
unsigned int i;
for (i = 0; i < n; i++) {
const char *pfx = "";
if (!i)
/* nothing */;
else if (!(i % 16))
fprintf(f, "\n");
else if (!(i % 8))
fprintf(f, " ");
else
pfx = sep;
fprintf(f, "%s%02hhx", i ? pfx : "", data[i]);
}
}
/* Split command line into argument vector. */
static int make_args(char *line, char *n_argv[], int maxargs, int cmd_nb)
{
static const char ws[] = " \t\r\n";
char *cp = line;
int n_argc = 0;
while (*cp) {
/* Skip leading whitespace. */
cp += strspn(cp, ws);
if (*cp == '\0')
break;
if (n_argc >= (maxargs - 1)) {
p_err("too many arguments to command %d", cmd_nb);
return -1;
}
/* Word begins with quote. */
if (*cp == '\'' || *cp == '"') {
char quote = *cp++;
n_argv[n_argc++] = cp;
/* Find ending quote. */
cp = strchr(cp, quote);
if (!cp) {
p_err("unterminated quoted string in command %d",
cmd_nb);
return -1;
}
} else {
n_argv[n_argc++] = cp;
/* Find end of word. */
cp += strcspn(cp, ws);
if (*cp == '\0')
break;
}
/* Separate words. */
*cp++ = 0;
}
n_argv[n_argc] = NULL;
return n_argc;
}
static int do_batch(int argc, char **argv)
{
char buf[BATCH_LINE_LEN_MAX], contline[BATCH_LINE_LEN_MAX];
char *n_argv[BATCH_ARG_NB_MAX];
unsigned int lines = 0;
int n_argc;
FILE *fp;
char *cp;
int err = 0;
int i;
if (argc < 2) {
p_err("too few parameters for batch");
return -1;
} else if (argc > 2) {
p_err("too many parameters for batch");
return -1;
} else if (!is_prefix(*argv, "file")) {
p_err("expected 'file', got: %s", *argv);
return -1;
}
NEXT_ARG();
if (!strcmp(*argv, "-"))
fp = stdin;
else
fp = fopen(*argv, "r");
if (!fp) {
p_err("Can't open file (%s): %s", *argv, strerror(errno));
return -1;
}
if (json_output)
jsonw_start_array(json_wtr);
while (fgets(buf, sizeof(buf), fp)) {
cp = strchr(buf, '#');
if (cp)
*cp = '\0';
if (strlen(buf) == sizeof(buf) - 1) {
errno = E2BIG;
break;
}
/* Append continuation lines if any (coming after a line ending
* with '\' in the batch file).
*/
while ((cp = strstr(buf, "\\\n")) != NULL) {
if (!fgets(contline, sizeof(contline), fp) ||
strlen(contline) == 0) {
p_err("missing continuation line on command %d",
lines);
err = -1;
goto err_close;
}
cp = strchr(contline, '#');
if (cp)
*cp = '\0';
if (strlen(buf) + strlen(contline) + 1 > sizeof(buf)) {
p_err("command %d is too long", lines);
err = -1;
goto err_close;
}
buf[strlen(buf) - 2] = '\0';
strcat(buf, contline);
}
n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
if (!n_argc)
continue;
if (n_argc < 0) {
err = n_argc;
goto err_close;
}
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "command");
jsonw_start_array(json_wtr);
for (i = 0; i < n_argc; i++)
jsonw_string(json_wtr, n_argv[i]);
jsonw_end_array(json_wtr);
jsonw_name(json_wtr, "output");
}
err = cmd_select(commands, n_argc, n_argv, do_help);
if (json_output)
jsonw_end_object(json_wtr);
if (err)
goto err_close;
lines++;
}
if (errno && errno != ENOENT) {
p_err("reading batch file failed: %s", strerror(errno));
err = -1;
} else {
if (!json_output)
printf("processed %d commands\n", lines);
}
err_close:
if (fp != stdin)
fclose(fp);
if (json_output)
jsonw_end_array(json_wtr);
return err;
}
int main(int argc, char **argv)
{
static const struct option options[] = {
{ "json", no_argument, NULL, 'j' },
{ "help", no_argument, NULL, 'h' },
{ "pretty", no_argument, NULL, 'p' },
{ "version", no_argument, NULL, 'V' },
{ "bpffs", no_argument, NULL, 'f' },
{ "mapcompat", no_argument, NULL, 'm' },
{ "nomount", no_argument, NULL, 'n' },
{ "debug", no_argument, NULL, 'd' },
{ "use-loader", no_argument, NULL, 'L' },
{ "base-btf", required_argument, NULL, 'B' },
{ 0 }
};
bool version_requested = false;
int opt, ret;
setlinebuf(stdout);
#ifdef USE_LIBCAP
/* Libcap < 2.63 hooks before main() to compute the number of
* capabilities of the running kernel, and doing so it calls prctl()
* which may fail and set errno to non-zero.
* Let's reset errno to make sure this does not interfere with the
* batch mode.
*/
errno = 0;
#endif
last_do_help = do_help;
pretty_output = false;
json_output = false;
show_pinned = false;
block_mount = false;
bin_name = "bpftool";
opterr = 0;
while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
version_requested = true;
break;
case 'h':
return do_help(argc, argv);
case 'p':
pretty_output = true;
/* fall through */
case 'j':
if (!json_output) {
json_wtr = jsonw_new(stdout);
if (!json_wtr) {
p_err("failed to create JSON writer");
return -1;
}
json_output = true;
}
jsonw_pretty(json_wtr, pretty_output);
break;
case 'f':
show_pinned = true;
break;
case 'm':
relaxed_maps = true;
break;
case 'n':
block_mount = true;
break;
case 'd':
libbpf_set_print(print_all_levels);
verifier_logs = true;
break;
case 'B':
base_btf = btf__parse(optarg, NULL);
if (!base_btf) {
p_err("failed to parse base BTF at '%s': %d\n",
optarg, -errno);
return -1;
}
break;
case 'L':
use_loader = true;
break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
clean_and_exit(-1);
else
usage();
}
}
argc -= optind;
argv += optind;
if (argc < 0)
usage();
if (version_requested)
return do_version(argc, argv);
ret = cmd_select(commands, argc, argv, do_help);
if (json_output)
jsonw_destroy(&json_wtr);
btf__free(base_btf);
return ret;
}
| linux-master | tools/bpf/bpftool/main.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2020 Facebook */
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <linux/err.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include "json_writer.h"
#include "main.h"
#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
static const struct btf_type *map_info_type;
static __u32 map_info_alloc_len;
static struct btf *btf_vmlinux;
static __s32 map_info_type_id;
struct res {
unsigned int nr_maps;
unsigned int nr_errs;
};
static const struct btf *get_btf_vmlinux(void)
{
if (btf_vmlinux)
return btf_vmlinux;
btf_vmlinux = libbpf_find_kernel_btf();
if (!btf_vmlinux)
p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
return btf_vmlinux;
}
static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
{
const struct btf *kern_btf;
const struct btf_type *t;
const char *st_ops_name;
kern_btf = get_btf_vmlinux();
if (!kern_btf)
return "<btf_vmlinux_not_found>";
t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
st_ops_name = btf__name_by_offset(kern_btf, t->name_off);
st_ops_name += strlen(STRUCT_OPS_VALUE_PREFIX);
return st_ops_name;
}
static __s32 get_map_info_type_id(void)
{
const struct btf *kern_btf;
if (map_info_type_id)
return map_info_type_id;
kern_btf = get_btf_vmlinux();
if (!kern_btf)
return 0;
map_info_type_id = btf__find_by_name_kind(kern_btf, "bpf_map_info",
BTF_KIND_STRUCT);
if (map_info_type_id < 0) {
p_err("can't find bpf_map_info from btf_vmlinux");
return map_info_type_id;
}
map_info_type = btf__type_by_id(kern_btf, map_info_type_id);
/* Ensure map_info_alloc() has at least what the bpftool needs */
map_info_alloc_len = map_info_type->size;
if (map_info_alloc_len < sizeof(struct bpf_map_info))
map_info_alloc_len = sizeof(struct bpf_map_info);
return map_info_type_id;
}
/* If the subcmd needs to print out the bpf_map_info,
* it should always call map_info_alloc to allocate
* a bpf_map_info object instead of allocating it
* on the stack.
*
* map_info_alloc() will take the running kernel's btf
* into account. i.e. it will consider the
* sizeof(struct bpf_map_info) of the running kernel.
*
* It will enable the "struct_ops" cmd to print the latest
* "struct bpf_map_info".
*
* [ Recall that "struct_ops" requires the kernel's btf to
* be available ]
*/
static struct bpf_map_info *map_info_alloc(__u32 *alloc_len)
{
struct bpf_map_info *info;
if (get_map_info_type_id() < 0)
return NULL;
info = calloc(1, map_info_alloc_len);
if (!info)
p_err("mem alloc failed");
else
*alloc_len = map_info_alloc_len;
return info;
}
/* It iterates all struct_ops maps of the system.
* It returns the fd in "*res_fd" and map_info in "*info".
* In the very first iteration, info->id should be 0.
* An optional map "*name" filter can be specified.
* The filter can be made more flexible in the future.
* e.g. filter by kernel-struct-ops-name, regex-name, glob-name, ...etc.
*
* Return value:
* 1: A struct_ops map found. It is returned in "*res_fd" and "*info".
* The caller can continue to call get_next in the future.
* 0: No struct_ops map is returned.
* All struct_ops map has been found.
* -1: Error and the caller should abort the iteration.
*/
static int get_next_struct_ops_map(const char *name, int *res_fd,
struct bpf_map_info *info, __u32 info_len)
{
__u32 id = info->id;
int err, fd;
while (true) {
err = bpf_map_get_next_id(id, &id);
if (err) {
if (errno == ENOENT)
return 0;
p_err("can't get next map: %s", strerror(errno));
return -1;
}
fd = bpf_map_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
p_err("can't get map by id (%u): %s",
id, strerror(errno));
return -1;
}
err = bpf_map_get_info_by_fd(fd, info, &info_len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
return -1;
}
if (info->type == BPF_MAP_TYPE_STRUCT_OPS &&
(!name || !strcmp(name, info->name))) {
*res_fd = fd;
return 1;
}
close(fd);
}
}
static int cmd_retval(const struct res *res, bool must_have_one_map)
{
if (res->nr_errs || (!res->nr_maps && must_have_one_map))
return -1;
return 0;
}
/* "data" is the work_func private storage */
typedef int (*work_func)(int fd, const struct bpf_map_info *info, void *data,
struct json_writer *wtr);
/* Find all struct_ops map in the system.
* Filter out by "name" (if specified).
* Then call "func(fd, info, data, wtr)" on each struct_ops map found.
*/
static struct res do_search(const char *name, work_func func, void *data,
struct json_writer *wtr)
{
struct bpf_map_info *info;
struct res res = {};
__u32 info_len;
int fd, err;
info = map_info_alloc(&info_len);
if (!info) {
res.nr_errs++;
return res;
}
if (wtr)
jsonw_start_array(wtr);
while ((err = get_next_struct_ops_map(name, &fd, info, info_len)) == 1) {
res.nr_maps++;
err = func(fd, info, data, wtr);
if (err)
res.nr_errs++;
close(fd);
}
if (wtr)
jsonw_end_array(wtr);
if (err)
res.nr_errs++;
if (!wtr && name && !res.nr_errs && !res.nr_maps)
/* It is not printing empty [].
* Thus, needs to specifically say nothing found
* for "name" here.
*/
p_err("no struct_ops found for %s", name);
else if (!wtr && json_output && !res.nr_errs)
/* The "func()" above is not writing any json (i.e. !wtr
* test here).
*
* However, "-j" is enabled and there is no errs here,
* so call json_null() as the current convention of
* other cmds.
*/
jsonw_null(json_wtr);
free(info);
return res;
}
static struct res do_one_id(const char *id_str, work_func func, void *data,
struct json_writer *wtr)
{
struct bpf_map_info *info;
struct res res = {};
unsigned long id;
__u32 info_len;
char *endptr;
int fd;
id = strtoul(id_str, &endptr, 0);
if (*endptr || !id || id > UINT32_MAX) {
p_err("invalid id %s", id_str);
res.nr_errs++;
return res;
}
fd = bpf_map_get_fd_by_id(id);
if (fd < 0) {
p_err("can't get map by id (%lu): %s", id, strerror(errno));
res.nr_errs++;
return res;
}
info = map_info_alloc(&info_len);
if (!info) {
res.nr_errs++;
goto done;
}
if (bpf_map_get_info_by_fd(fd, info, &info_len)) {
p_err("can't get map info: %s", strerror(errno));
res.nr_errs++;
goto done;
}
if (info->type != BPF_MAP_TYPE_STRUCT_OPS) {
p_err("%s id %u is not a struct_ops map", info->name, info->id);
res.nr_errs++;
goto done;
}
res.nr_maps++;
if (func(fd, info, data, wtr))
res.nr_errs++;
else if (!wtr && json_output)
/* The "func()" above is not writing any json (i.e. !wtr
* test here).
*
* However, "-j" is enabled and there is no errs here,
* so call json_null() as the current convention of
* other cmds.
*/
jsonw_null(json_wtr);
done:
free(info);
close(fd);
return res;
}
static struct res do_work_on_struct_ops(const char *search_type,
const char *search_term,
work_func func, void *data,
struct json_writer *wtr)
{
if (search_type) {
if (is_prefix(search_type, "id"))
return do_one_id(search_term, func, data, wtr);
else if (!is_prefix(search_type, "name"))
usage();
}
return do_search(search_term, func, data, wtr);
}
static int __do_show(int fd, const struct bpf_map_info *info, void *data,
struct json_writer *wtr)
{
if (wtr) {
jsonw_start_object(wtr);
jsonw_uint_field(wtr, "id", info->id);
jsonw_string_field(wtr, "name", info->name);
jsonw_string_field(wtr, "kernel_struct_ops",
get_kern_struct_ops_name(info));
jsonw_end_object(wtr);
} else {
printf("%u: %-15s %-32s\n", info->id, info->name,
get_kern_struct_ops_name(info));
}
return 0;
}
static int do_show(int argc, char **argv)
{
const char *search_type = NULL, *search_term = NULL;
struct res res;
if (argc && argc != 2)
usage();
if (argc == 2) {
search_type = GET_ARG();
search_term = GET_ARG();
}
res = do_work_on_struct_ops(search_type, search_term, __do_show,
NULL, json_wtr);
return cmd_retval(&res, !!search_term);
}
static int __do_dump(int fd, const struct bpf_map_info *info, void *data,
struct json_writer *wtr)
{
struct btf_dumper *d = (struct btf_dumper *)data;
const struct btf_type *struct_ops_type;
const struct btf *kern_btf = d->btf;
const char *struct_ops_name;
int zero = 0;
void *value;
/* note: d->jw == wtr */
kern_btf = d->btf;
/* The kernel supporting BPF_MAP_TYPE_STRUCT_OPS must have
* btf_vmlinux_value_type_id.
*/
struct_ops_type = btf__type_by_id(kern_btf,
info->btf_vmlinux_value_type_id);
struct_ops_name = btf__name_by_offset(kern_btf,
struct_ops_type->name_off);
value = calloc(1, info->value_size);
if (!value) {
p_err("mem alloc failed");
return -1;
}
if (bpf_map_lookup_elem(fd, &zero, value)) {
p_err("can't lookup struct_ops map %s id %u",
info->name, info->id);
free(value);
return -1;
}
jsonw_start_object(wtr);
jsonw_name(wtr, "bpf_map_info");
btf_dumper_type(d, map_info_type_id, (void *)info);
jsonw_end_object(wtr);
jsonw_start_object(wtr);
jsonw_name(wtr, struct_ops_name);
btf_dumper_type(d, info->btf_vmlinux_value_type_id, value);
jsonw_end_object(wtr);
free(value);
return 0;
}
static int do_dump(int argc, char **argv)
{
const char *search_type = NULL, *search_term = NULL;
json_writer_t *wtr = json_wtr;
const struct btf *kern_btf;
struct btf_dumper d = {};
struct res res;
if (argc && argc != 2)
usage();
if (argc == 2) {
search_type = GET_ARG();
search_term = GET_ARG();
}
kern_btf = get_btf_vmlinux();
if (!kern_btf)
return -1;
if (!json_output) {
wtr = jsonw_new(stdout);
if (!wtr) {
p_err("can't create json writer");
return -1;
}
jsonw_pretty(wtr, true);
}
d.btf = kern_btf;
d.jw = wtr;
d.is_plain_text = !json_output;
d.prog_id_as_func_ptr = true;
res = do_work_on_struct_ops(search_type, search_term, __do_dump, &d,
wtr);
if (!json_output)
jsonw_destroy(&wtr);
return cmd_retval(&res, !!search_term);
}
static int __do_unregister(int fd, const struct bpf_map_info *info, void *data,
struct json_writer *wtr)
{
int zero = 0;
if (bpf_map_delete_elem(fd, &zero)) {
p_err("can't unload %s %s id %u: %s",
get_kern_struct_ops_name(info), info->name,
info->id, strerror(errno));
return -1;
}
p_info("Unregistered %s %s id %u",
get_kern_struct_ops_name(info), info->name,
info->id);
return 0;
}
static int do_unregister(int argc, char **argv)
{
const char *search_type, *search_term;
struct res res;
if (argc != 2)
usage();
search_type = GET_ARG();
search_term = GET_ARG();
res = do_work_on_struct_ops(search_type, search_term,
__do_unregister, NULL, NULL);
return cmd_retval(&res, true);
}
static int pin_link(struct bpf_link *link, const char *pindir,
const char *name)
{
char pinfile[PATH_MAX];
int err;
err = pathname_concat(pinfile, sizeof(pinfile), pindir, name);
if (err)
return -1;
return bpf_link__pin(link, pinfile);
}
static int do_register(int argc, char **argv)
{
LIBBPF_OPTS(bpf_object_open_opts, open_opts);
__u32 link_info_len = sizeof(struct bpf_link_info);
struct bpf_link_info link_info = {};
struct bpf_map_info info = {};
__u32 info_len = sizeof(info);
int nr_errs = 0, nr_maps = 0;
const char *linkdir = NULL;
struct bpf_object *obj;
struct bpf_link *link;
struct bpf_map *map;
const char *file;
if (argc != 1 && argc != 2)
usage();
file = GET_ARG();
if (argc == 1)
linkdir = GET_ARG();
if (linkdir && mount_bpffs_for_pin(linkdir, true)) {
p_err("can't mount bpffs for pinning");
return -1;
}
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
open_opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_file(file, &open_opts);
if (!obj)
return -1;
set_max_rlimit();
if (bpf_object__load(obj)) {
bpf_object__close(obj);
return -1;
}
bpf_object__for_each_map(map, obj) {
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
continue;
link = bpf_map__attach_struct_ops(map);
if (!link) {
p_err("can't register struct_ops %s: %s",
bpf_map__name(map), strerror(errno));
nr_errs++;
continue;
}
nr_maps++;
if (bpf_map_get_info_by_fd(bpf_map__fd(map), &info,
&info_len)) {
/* Not p_err. The struct_ops was attached
* successfully.
*/
p_info("Registered %s but can't find id: %s",
bpf_map__name(map), strerror(errno));
goto clean_link;
}
if (!(bpf_map__map_flags(map) & BPF_F_LINK)) {
p_info("Registered %s %s id %u",
get_kern_struct_ops_name(&info),
info.name,
info.id);
goto clean_link;
}
if (bpf_link_get_info_by_fd(bpf_link__fd(link),
&link_info,
&link_info_len)) {
p_err("Registered %s but can't find link id: %s",
bpf_map__name(map), strerror(errno));
nr_errs++;
goto clean_link;
}
if (linkdir && pin_link(link, linkdir, info.name)) {
p_err("can't pin link %u for %s: %s",
link_info.id, info.name,
strerror(errno));
nr_errs++;
goto clean_link;
}
p_info("Registered %s %s map id %u link id %u",
get_kern_struct_ops_name(&info),
info.name, info.id, link_info.id);
clean_link:
bpf_link__disconnect(link);
bpf_link__destroy(link);
}
bpf_object__close(obj);
if (nr_errs)
return -1;
if (!nr_maps) {
p_err("no struct_ops found in %s", file);
return -1;
}
if (json_output)
jsonw_null(json_wtr);
return 0;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s { show | list } [STRUCT_OPS_MAP]\n"
" %1$s %2$s dump [STRUCT_OPS_MAP]\n"
" %1$s %2$s register OBJ [LINK_DIR]\n"
" %1$s %2$s unregister STRUCT_OPS_MAP\n"
" %1$s %2$s help\n"
"\n"
" STRUCT_OPS_MAP := [ id STRUCT_OPS_MAP_ID | name STRUCT_OPS_MAP_NAME ]\n"
" " HELP_SPEC_OPTIONS " }\n"
"",
bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
{ "register", do_register },
{ "unregister", do_unregister },
{ "dump", do_dump },
{ "help", do_help },
{ 0 }
};
int do_struct_ops(int argc, char **argv)
{
int err;
err = cmd_select(cmds, argc, argv, do_help);
btf__free(btf_vmlinux);
return err;
}
| linux-master | tools/bpf/bpftool/struct_ops.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <dirent.h>
#include <linux/err.h>
#include <linux/perf_event.h>
#include <linux/sizes.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h>
#include <bpf/libbpf_internal.h>
#include <bpf/skel_internal.h>
#include "cfg.h"
#include "main.h"
#include "xlated_dumper.h"
#define BPF_METADATA_PREFIX "bpf_metadata_"
#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
enum dump_mode {
DUMP_JITED,
DUMP_XLATED,
};
static const bool attach_types[] = {
[BPF_SK_SKB_STREAM_PARSER] = true,
[BPF_SK_SKB_STREAM_VERDICT] = true,
[BPF_SK_SKB_VERDICT] = true,
[BPF_SK_MSG_VERDICT] = true,
[BPF_FLOW_DISSECTOR] = true,
[__MAX_BPF_ATTACH_TYPE] = false,
};
/* Textual representations traditionally used by the program and kept around
* for the sake of backwards compatibility.
*/
static const char * const attach_type_strings[] = {
[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
[BPF_SK_SKB_VERDICT] = "skb_verdict",
[BPF_SK_MSG_VERDICT] = "msg_verdict",
[__MAX_BPF_ATTACH_TYPE] = NULL,
};
static struct hashmap *prog_table;
static enum bpf_attach_type parse_attach_type(const char *str)
{
enum bpf_attach_type type;
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
if (attach_types[type]) {
const char *attach_type_str;
attach_type_str = libbpf_bpf_attach_type_str(type);
if (!strcmp(str, attach_type_str))
return type;
}
if (attach_type_strings[type] &&
is_prefix(str, attach_type_strings[type]))
return type;
}
return __MAX_BPF_ATTACH_TYPE;
}
static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
void **info_data, size_t *const info_data_sz)
{
struct bpf_prog_info holder = {};
size_t needed = 0;
void *ptr;
if (mode == DUMP_JITED) {
holder.jited_prog_len = info->jited_prog_len;
needed += info->jited_prog_len;
} else {
holder.xlated_prog_len = info->xlated_prog_len;
needed += info->xlated_prog_len;
}
holder.nr_jited_ksyms = info->nr_jited_ksyms;
needed += info->nr_jited_ksyms * sizeof(__u64);
holder.nr_jited_func_lens = info->nr_jited_func_lens;
needed += info->nr_jited_func_lens * sizeof(__u32);
holder.nr_func_info = info->nr_func_info;
holder.func_info_rec_size = info->func_info_rec_size;
needed += info->nr_func_info * info->func_info_rec_size;
holder.nr_line_info = info->nr_line_info;
holder.line_info_rec_size = info->line_info_rec_size;
needed += info->nr_line_info * info->line_info_rec_size;
holder.nr_jited_line_info = info->nr_jited_line_info;
holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
if (needed > *info_data_sz) {
ptr = realloc(*info_data, needed);
if (!ptr)
return -1;
*info_data = ptr;
*info_data_sz = needed;
}
ptr = *info_data;
if (mode == DUMP_JITED) {
holder.jited_prog_insns = ptr_to_u64(ptr);
ptr += holder.jited_prog_len;
} else {
holder.xlated_prog_insns = ptr_to_u64(ptr);
ptr += holder.xlated_prog_len;
}
holder.jited_ksyms = ptr_to_u64(ptr);
ptr += holder.nr_jited_ksyms * sizeof(__u64);
holder.jited_func_lens = ptr_to_u64(ptr);
ptr += holder.nr_jited_func_lens * sizeof(__u32);
holder.func_info = ptr_to_u64(ptr);
ptr += holder.nr_func_info * holder.func_info_rec_size;
holder.line_info = ptr_to_u64(ptr);
ptr += holder.nr_line_info * holder.line_info_rec_size;
holder.jited_line_info = ptr_to_u64(ptr);
ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
*info = holder;
return 0;
}
static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
{
struct timespec real_time_ts, boot_time_ts;
time_t wallclock_secs;
struct tm load_tm;
buf[--size] = '\0';
if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
perror("Can't read clocks");
snprintf(buf, size, "%llu", nsecs / 1000000000);
return;
}
wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
1000000000;
if (!localtime_r(&wallclock_secs, &load_tm)) {
snprintf(buf, size, "%llu", nsecs / 1000000000);
return;
}
if (json_output)
strftime(buf, size, "%s", &load_tm);
else
strftime(buf, size, "%FT%T%z", &load_tm);
}
static void show_prog_maps(int fd, __u32 num_maps)
{
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
__u32 map_ids[num_maps];
unsigned int i;
int err;
info.nr_map_ids = num_maps;
info.map_ids = ptr_to_u64(map_ids);
err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err || !info.nr_map_ids)
return;
if (json_output) {
jsonw_name(json_wtr, "map_ids");
jsonw_start_array(json_wtr);
for (i = 0; i < info.nr_map_ids; i++)
jsonw_uint(json_wtr, map_ids[i]);
jsonw_end_array(json_wtr);
} else {
printf(" map_ids ");
for (i = 0; i < info.nr_map_ids; i++)
printf("%u%s", map_ids[i],
i == info.nr_map_ids - 1 ? "" : ",");
}
}
static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
{
struct bpf_prog_info prog_info;
__u32 prog_info_len;
__u32 map_info_len;
void *value = NULL;
__u32 *map_ids;
int nr_maps;
int key = 0;
int map_fd;
int ret;
__u32 i;
memset(&prog_info, 0, sizeof(prog_info));
prog_info_len = sizeof(prog_info);
ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
return NULL;
if (!prog_info.nr_map_ids)
return NULL;
map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
if (!map_ids)
return NULL;
nr_maps = prog_info.nr_map_ids;
memset(&prog_info, 0, sizeof(prog_info));
prog_info.nr_map_ids = nr_maps;
prog_info.map_ids = ptr_to_u64(map_ids);
prog_info_len = sizeof(prog_info);
ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
goto free_map_ids;
for (i = 0; i < prog_info.nr_map_ids; i++) {
map_fd = bpf_map_get_fd_by_id(map_ids[i]);
if (map_fd < 0)
goto free_map_ids;
memset(map_info, 0, sizeof(*map_info));
map_info_len = sizeof(*map_info);
ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len);
if (ret < 0) {
close(map_fd);
goto free_map_ids;
}
if (map_info->type != BPF_MAP_TYPE_ARRAY ||
map_info->key_size != sizeof(int) ||
map_info->max_entries != 1 ||
!map_info->btf_value_type_id ||
!strstr(map_info->name, ".rodata")) {
close(map_fd);
continue;
}
value = malloc(map_info->value_size);
if (!value) {
close(map_fd);
goto free_map_ids;
}
if (bpf_map_lookup_elem(map_fd, &key, value)) {
close(map_fd);
free(value);
value = NULL;
goto free_map_ids;
}
close(map_fd);
break;
}
free_map_ids:
free(map_ids);
return value;
}
static bool has_metadata_prefix(const char *s)
{
return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
}
static void show_prog_metadata(int fd, __u32 num_maps)
{
const struct btf_type *t_datasec, *t_var;
struct bpf_map_info map_info;
struct btf_var_secinfo *vsi;
bool printed_header = false;
unsigned int i, vlen;
void *value = NULL;
const char *name;
struct btf *btf;
int err;
if (!num_maps)
return;
memset(&map_info, 0, sizeof(map_info));
value = find_metadata(fd, &map_info);
if (!value)
return;
btf = btf__load_from_kernel_by_id(map_info.btf_id);
if (!btf)
goto out_free;
t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
if (!btf_is_datasec(t_datasec))
goto out_free;
vlen = btf_vlen(t_datasec);
vsi = btf_var_secinfos(t_datasec);
/* We don't proceed to check the kinds of the elements of the DATASEC.
* The verifier enforces them to be BTF_KIND_VAR.
*/
if (json_output) {
struct btf_dumper d = {
.btf = btf,
.jw = json_wtr,
.is_plain_text = false,
};
for (i = 0; i < vlen; i++, vsi++) {
t_var = btf__type_by_id(btf, vsi->type);
name = btf__name_by_offset(btf, t_var->name_off);
if (!has_metadata_prefix(name))
continue;
if (!printed_header) {
jsonw_name(json_wtr, "metadata");
jsonw_start_object(json_wtr);
printed_header = true;
}
jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
if (err) {
p_err("btf dump failed: %d", err);
break;
}
}
if (printed_header)
jsonw_end_object(json_wtr);
} else {
json_writer_t *btf_wtr;
struct btf_dumper d = {
.btf = btf,
.is_plain_text = true,
};
for (i = 0; i < vlen; i++, vsi++) {
t_var = btf__type_by_id(btf, vsi->type);
name = btf__name_by_offset(btf, t_var->name_off);
if (!has_metadata_prefix(name))
continue;
if (!printed_header) {
printf("\tmetadata:");
btf_wtr = jsonw_new(stdout);
if (!btf_wtr) {
p_err("jsonw alloc failed");
goto out_free;
}
d.jw = btf_wtr,
printed_header = true;
}
printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
jsonw_reset(btf_wtr);
err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
if (err) {
p_err("btf dump failed: %d", err);
break;
}
}
if (printed_header)
jsonw_destroy(&btf_wtr);
}
out_free:
btf__free(btf);
free(value);
}
static void print_prog_header_json(struct bpf_prog_info *info, int fd)
{
const char *prog_type_str;
char prog_name[MAX_PROG_FULL_NAME];
jsonw_uint_field(json_wtr, "id", info->id);
prog_type_str = libbpf_bpf_prog_type_str(info->type);
if (prog_type_str)
jsonw_string_field(json_wtr, "type", prog_type_str);
else
jsonw_uint_field(json_wtr, "type", info->type);
if (*info->name) {
get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
jsonw_string_field(json_wtr, "name", prog_name);
}
jsonw_name(json_wtr, "tag");
jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
info->tag[0], info->tag[1], info->tag[2], info->tag[3],
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
if (info->run_time_ns) {
jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
}
if (info->recursion_misses)
jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
}
static void print_prog_json(struct bpf_prog_info *info, int fd)
{
char *memlock;
jsonw_start_object(json_wtr);
print_prog_header_json(info, fd);
print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
if (info->load_time) {
char buf[32];
print_boot_time(info->load_time, buf, sizeof(buf));
/* Piggy back on load_time, since 0 uid is a valid one */
jsonw_name(json_wtr, "loaded_at");
jsonw_printf(json_wtr, "%s", buf);
jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
}
jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
if (info->jited_prog_len) {
jsonw_bool_field(json_wtr, "jited", true);
jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
} else {
jsonw_bool_field(json_wtr, "jited", false);
}
memlock = get_fdinfo(fd, "memlock");
if (memlock)
jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
free(memlock);
if (info->nr_map_ids)
show_prog_maps(fd, info->nr_map_ids);
if (info->btf_id)
jsonw_int_field(json_wtr, "btf_id", info->btf_id);
if (!hashmap__empty(prog_table)) {
struct hashmap_entry *entry;
jsonw_name(json_wtr, "pinned");
jsonw_start_array(json_wtr);
hashmap__for_each_key_entry(prog_table, entry, info->id)
jsonw_string(json_wtr, entry->pvalue);
jsonw_end_array(json_wtr);
}
emit_obj_refs_json(refs_table, info->id, json_wtr);
show_prog_metadata(fd, info->nr_map_ids);
jsonw_end_object(json_wtr);
}
static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
{
const char *prog_type_str;
char prog_name[MAX_PROG_FULL_NAME];
printf("%u: ", info->id);
prog_type_str = libbpf_bpf_prog_type_str(info->type);
if (prog_type_str)
printf("%s ", prog_type_str);
else
printf("type %u ", info->type);
if (*info->name) {
get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
printf("name %s ", prog_name);
}
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
printf("%s", info->gpl_compatible ? " gpl" : "");
if (info->run_time_ns)
printf(" run_time_ns %lld run_cnt %lld",
info->run_time_ns, info->run_cnt);
if (info->recursion_misses)
printf(" recursion_misses %lld", info->recursion_misses);
printf("\n");
}
static void print_prog_plain(struct bpf_prog_info *info, int fd)
{
char *memlock;
print_prog_header_plain(info, fd);
if (info->load_time) {
char buf[32];
print_boot_time(info->load_time, buf, sizeof(buf));
/* Piggy back on load_time, since 0 uid is a valid one */
printf("\tloaded_at %s uid %u\n", buf, info->created_by_uid);
}
printf("\txlated %uB", info->xlated_prog_len);
if (info->jited_prog_len)
printf(" jited %uB", info->jited_prog_len);
else
printf(" not jited");
memlock = get_fdinfo(fd, "memlock");
if (memlock)
printf(" memlock %sB", memlock);
free(memlock);
if (info->nr_map_ids)
show_prog_maps(fd, info->nr_map_ids);
if (!hashmap__empty(prog_table)) {
struct hashmap_entry *entry;
hashmap__for_each_key_entry(prog_table, entry, info->id)
printf("\n\tpinned %s", (char *)entry->pvalue);
}
if (info->btf_id)
printf("\n\tbtf_id %d", info->btf_id);
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
printf("\n");
show_prog_metadata(fd, info->nr_map_ids);
}
static int show_prog(int fd)
{
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
int err;
err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
return -1;
}
if (json_output)
print_prog_json(&info, fd);
else
print_prog_plain(&info, fd);
return 0;
}
static int do_show_subset(int argc, char **argv)
{
int *fds = NULL;
int nb_fds, i;
int err = -1;
fds = malloc(sizeof(int));
if (!fds) {
p_err("mem alloc failed");
return -1;
}
nb_fds = prog_parse_fds(&argc, &argv, &fds);
if (nb_fds < 1)
goto exit_free;
if (json_output && nb_fds > 1)
jsonw_start_array(json_wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
err = show_prog(fds[i]);
if (err) {
for (; i < nb_fds; i++)
close(fds[i]);
break;
}
close(fds[i]);
}
if (json_output && nb_fds > 1)
jsonw_end_array(json_wtr); /* root array */
exit_free:
free(fds);
return err;
}
static int do_show(int argc, char **argv)
{
__u32 id = 0;
int err;
int fd;
if (show_pinned) {
prog_table = hashmap__new(hash_fn_for_key_as_id,
equal_fn_for_key_as_id, NULL);
if (IS_ERR(prog_table)) {
p_err("failed to create hashmap for pinned paths");
return -1;
}
build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
}
build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
if (argc == 2)
return do_show_subset(argc, argv);
if (argc)
return BAD_ARG();
if (json_output)
jsonw_start_array(json_wtr);
while (true) {
err = bpf_prog_get_next_id(id, &id);
if (err) {
if (errno == ENOENT) {
err = 0;
break;
}
p_err("can't get next program: %s%s", strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
err = -1;
break;
}
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
p_err("can't get prog by id (%u): %s",
id, strerror(errno));
err = -1;
break;
}
err = show_prog(fd);
close(fd);
if (err)
break;
}
if (json_output)
jsonw_end_array(json_wtr);
delete_obj_refs_table(refs_table);
if (show_pinned)
delete_pinned_obj_table(prog_table);
return err;
}
static int
prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
char *filepath, bool opcodes, bool visual, bool linum)
{
struct bpf_prog_linfo *prog_linfo = NULL;
const char *disasm_opt = NULL;
struct dump_data dd = {};
void *func_info = NULL;
struct btf *btf = NULL;
char func_sig[1024];
unsigned char *buf;
__u32 member_len;
int fd, err = -1;
ssize_t n;
if (mode == DUMP_JITED) {
if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
p_info("no instructions returned");
return -1;
}
buf = u64_to_ptr(info->jited_prog_insns);
member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
return -1;
}
buf = u64_to_ptr(info->xlated_prog_insns);
member_len = info->xlated_prog_len;
}
if (info->btf_id) {
btf = btf__load_from_kernel_by_id(info->btf_id);
if (!btf) {
p_err("failed to get btf");
return -1;
}
}
func_info = u64_to_ptr(info->func_info);
if (info->nr_line_info) {
prog_linfo = bpf_prog_linfo__new(info);
if (!prog_linfo)
p_info("error in processing bpf_line_info. continue without it.");
}
if (filepath) {
fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) {
p_err("can't open file %s: %s", filepath,
strerror(errno));
goto exit_free;
}
n = write(fd, buf, member_len);
close(fd);
if (n != (ssize_t)member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
goto exit_free;
}
if (json_output)
jsonw_null(json_wtr);
} else if (mode == DUMP_JITED) {
const char *name = NULL;
if (info->ifindex) {
name = ifindex_to_arch(info->ifindex, info->netns_dev,
info->netns_ino, &disasm_opt);
if (!name)
goto exit_free;
}
if (info->nr_jited_func_lens && info->jited_func_lens) {
struct kernel_sym *sym = NULL;
struct bpf_func_info *record;
char sym_name[SYM_MAX_NAME];
unsigned char *img = buf;
__u64 *ksyms = NULL;
__u32 *lens;
__u32 i;
if (info->nr_jited_ksyms) {
kernel_syms_load(&dd);
ksyms = u64_to_ptr(info->jited_ksyms);
}
if (json_output)
jsonw_start_array(json_wtr);
lens = u64_to_ptr(info->jited_func_lens);
for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]);
if (sym)
sprintf(sym_name, "%s", sym->name);
else
sprintf(sym_name, "0x%016llx", ksyms[i]);
} else {
strcpy(sym_name, "unknown");
}
if (func_info) {
record = func_info + i * info->func_info_rec_size;
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
}
if (json_output) {
jsonw_start_object(json_wtr);
if (func_info && func_sig[0] != '\0') {
jsonw_name(json_wtr, "proto");
jsonw_string(json_wtr, func_sig);
}
jsonw_name(json_wtr, "name");
jsonw_string(json_wtr, sym_name);
jsonw_name(json_wtr, "insns");
} else {
if (func_info && func_sig[0] != '\0')
printf("%s:\n", func_sig);
printf("%s:\n", sym_name);
}
if (disasm_print_insn(img, lens[i], opcodes,
name, disasm_opt, btf,
prog_linfo, ksyms[i], i,
linum))
goto exit_free;
img += lens[i];
if (json_output)
jsonw_end_object(json_wtr);
else
printf("\n");
}
if (json_output)
jsonw_end_array(json_wtr);
} else {
if (disasm_print_insn(buf, member_len, opcodes, name,
disasm_opt, btf, NULL, 0, 0,
false))
goto exit_free;
}
} else {
kernel_syms_load(&dd);
dd.nr_jited_ksyms = info->nr_jited_ksyms;
dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
dd.btf = btf;
dd.func_info = func_info;
dd.finfo_rec_size = info->func_info_rec_size;
dd.prog_linfo = prog_linfo;
if (json_output)
dump_xlated_json(&dd, buf, member_len, opcodes, linum);
else if (visual)
dump_xlated_cfg(&dd, buf, member_len, opcodes, linum);
else
dump_xlated_plain(&dd, buf, member_len, opcodes, linum);
kernel_syms_destroy(&dd);
}
err = 0;
exit_free:
btf__free(btf);
bpf_prog_linfo__free(prog_linfo);
return err;
}
static int do_dump(int argc, char **argv)
{
struct bpf_prog_info info;
__u32 info_len = sizeof(info);
size_t info_data_sz = 0;
void *info_data = NULL;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
enum dump_mode mode;
bool linum = false;
int nb_fds, i = 0;
int *fds = NULL;
int err = -1;
if (is_prefix(*argv, "jited")) {
if (disasm_init())
return -1;
mode = DUMP_JITED;
} else if (is_prefix(*argv, "xlated")) {
mode = DUMP_XLATED;
} else {
p_err("expected 'xlated' or 'jited', got: %s", *argv);
return -1;
}
NEXT_ARG();
if (argc < 2)
usage();
fds = malloc(sizeof(int));
if (!fds) {
p_err("mem alloc failed");
return -1;
}
nb_fds = prog_parse_fds(&argc, &argv, &fds);
if (nb_fds < 1)
goto exit_free;
while (argc) {
if (is_prefix(*argv, "file")) {
NEXT_ARG();
if (!argc) {
p_err("expected file path");
goto exit_close;
}
if (nb_fds > 1) {
p_err("several programs matched");
goto exit_close;
}
filepath = *argv;
NEXT_ARG();
} else if (is_prefix(*argv, "opcodes")) {
opcodes = true;
NEXT_ARG();
} else if (is_prefix(*argv, "visual")) {
if (nb_fds > 1) {
p_err("several programs matched");
goto exit_close;
}
visual = true;
NEXT_ARG();
} else if (is_prefix(*argv, "linum")) {
linum = true;
NEXT_ARG();
} else {
usage();
goto exit_close;
}
}
if (filepath && (opcodes || visual || linum)) {
p_err("'file' is not compatible with 'opcodes', 'visual', or 'linum'");
goto exit_close;
}
if (json_output && visual) {
p_err("'visual' is not compatible with JSON output");
goto exit_close;
}
if (json_output && nb_fds > 1)
jsonw_start_array(json_wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
memset(&info, 0, sizeof(info));
err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
break;
}
err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
if (err) {
p_err("can't grow prog info_data");
break;
}
err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
break;
}
if (json_output && nb_fds > 1) {
jsonw_start_object(json_wtr); /* prog object */
print_prog_header_json(&info, fds[i]);
jsonw_name(json_wtr, "insns");
} else if (nb_fds > 1) {
print_prog_header_plain(&info, fds[i]);
}
err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
if (json_output && nb_fds > 1)
jsonw_end_object(json_wtr); /* prog object */
else if (i != nb_fds - 1 && nb_fds > 1)
printf("\n");
if (err)
break;
close(fds[i]);
}
if (json_output && nb_fds > 1)
jsonw_end_array(json_wtr); /* root array */
exit_close:
for (; i < nb_fds; i++)
close(fds[i]);
exit_free:
free(info_data);
free(fds);
return err;
}
static int do_pin(int argc, char **argv)
{
int err;
err = do_pin_any(argc, argv, prog_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
}
struct map_replace {
int idx;
int fd;
char *name;
};
static int map_replace_compar(const void *p1, const void *p2)
{
const struct map_replace *a = p1, *b = p2;
return a->idx - b->idx;
}
static int parse_attach_detach_args(int argc, char **argv, int *progfd,
enum bpf_attach_type *attach_type,
int *mapfd)
{
if (!REQ_ARGS(3))
return -EINVAL;
*progfd = prog_parse_fd(&argc, &argv);
if (*progfd < 0)
return *progfd;
*attach_type = parse_attach_type(*argv);
if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
p_err("invalid attach/detach type");
return -EINVAL;
}
if (*attach_type == BPF_FLOW_DISSECTOR) {
*mapfd = 0;
return 0;
}
NEXT_ARG();
if (!REQ_ARGS(2))
return -EINVAL;
*mapfd = map_parse_fd(&argc, &argv);
if (*mapfd < 0)
return *mapfd;
return 0;
}
static int do_attach(int argc, char **argv)
{
enum bpf_attach_type attach_type;
int err, progfd;
int mapfd;
err = parse_attach_detach_args(argc, argv,
&progfd, &attach_type, &mapfd);
if (err)
return err;
err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
if (err) {
p_err("failed prog attach to map");
return -EINVAL;
}
if (json_output)
jsonw_null(json_wtr);
return 0;
}
static int do_detach(int argc, char **argv)
{
enum bpf_attach_type attach_type;
int err, progfd;
int mapfd;
err = parse_attach_detach_args(argc, argv,
&progfd, &attach_type, &mapfd);
if (err)
return err;
err = bpf_prog_detach2(progfd, mapfd, attach_type);
if (err) {
p_err("failed prog detach from map");
return -EINVAL;
}
if (json_output)
jsonw_null(json_wtr);
return 0;
}
static int check_single_stdin(char *file_data_in, char *file_ctx_in)
{
if (file_data_in && file_ctx_in &&
!strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
p_err("cannot use standard input for both data_in and ctx_in");
return -1;
}
return 0;
}
static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
{
size_t block_size = 256;
size_t buf_size = block_size;
size_t nb_read = 0;
void *tmp;
FILE *f;
if (!fname) {
*data_ptr = NULL;
*size = 0;
return 0;
}
if (!strcmp(fname, "-"))
f = stdin;
else
f = fopen(fname, "r");
if (!f) {
p_err("failed to open %s: %s", fname, strerror(errno));
return -1;
}
*data_ptr = malloc(block_size);
if (!*data_ptr) {
p_err("failed to allocate memory for data_in/ctx_in: %s",
strerror(errno));
goto err_fclose;
}
while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
if (feof(f))
break;
if (ferror(f)) {
p_err("failed to read data_in/ctx_in from %s: %s",
fname, strerror(errno));
goto err_free;
}
if (nb_read > buf_size - block_size) {
if (buf_size == UINT32_MAX) {
p_err("data_in/ctx_in is too long (max: %d)",
UINT32_MAX);
goto err_free;
}
/* No space for fread()-ing next chunk; realloc() */
buf_size *= 2;
tmp = realloc(*data_ptr, buf_size);
if (!tmp) {
p_err("failed to reallocate data_in/ctx_in: %s",
strerror(errno));
goto err_free;
}
*data_ptr = tmp;
}
}
if (f != stdin)
fclose(f);
*size = nb_read;
return 0;
err_free:
free(*data_ptr);
*data_ptr = NULL;
err_fclose:
if (f != stdin)
fclose(f);
return -1;
}
static void hex_print(void *data, unsigned int size, FILE *f)
{
size_t i, j;
char c;
for (i = 0; i < size; i += 16) {
/* Row offset */
fprintf(f, "%07zx\t", i);
/* Hexadecimal values */
for (j = i; j < i + 16 && j < size; j++)
fprintf(f, "%02x%s", *(uint8_t *)(data + j),
j % 2 ? " " : "");
for (; j < i + 16; j++)
fprintf(f, " %s", j % 2 ? " " : "");
/* ASCII values (if relevant), '.' otherwise */
fprintf(f, "| ");
for (j = i; j < i + 16 && j < size; j++) {
c = *(char *)(data + j);
if (c < ' ' || c > '~')
c = '.';
fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
}
fprintf(f, "\n");
}
}
static int
print_run_output(void *data, unsigned int size, const char *fname,
const char *json_key)
{
size_t nb_written;
FILE *f;
if (!fname)
return 0;
if (!strcmp(fname, "-")) {
f = stdout;
if (json_output) {
jsonw_name(json_wtr, json_key);
print_data_json(data, size);
} else {
hex_print(data, size, f);
}
return 0;
}
f = fopen(fname, "w");
if (!f) {
p_err("failed to open %s: %s", fname, strerror(errno));
return -1;
}
nb_written = fwrite(data, 1, size, f);
fclose(f);
if (nb_written != size) {
p_err("failed to write output data/ctx: %s", strerror(errno));
return -1;
}
return 0;
}
static int alloc_run_data(void **data_ptr, unsigned int size_out)
{
*data_ptr = calloc(size_out, 1);
if (!*data_ptr) {
p_err("failed to allocate memory for output data/ctx: %s",
strerror(errno));
return -1;
}
return 0;
}
static int do_run(int argc, char **argv)
{
char *data_fname_in = NULL, *data_fname_out = NULL;
char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
const unsigned int default_size = SZ_32K;
void *data_in = NULL, *data_out = NULL;
void *ctx_in = NULL, *ctx_out = NULL;
unsigned int repeat = 1;
int fd, err;
LIBBPF_OPTS(bpf_test_run_opts, test_attr);
if (!REQ_ARGS(4))
return -1;
fd = prog_parse_fd(&argc, &argv);
if (fd < 0)
return -1;
while (argc) {
if (detect_common_prefix(*argv, "data_in", "data_out",
"data_size_out", NULL))
return -1;
if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
"ctx_size_out", NULL))
return -1;
if (is_prefix(*argv, "data_in")) {
NEXT_ARG();
if (!REQ_ARGS(1))
return -1;
data_fname_in = GET_ARG();
if (check_single_stdin(data_fname_in, ctx_fname_in))
return -1;
} else if (is_prefix(*argv, "data_out")) {
NEXT_ARG();
if (!REQ_ARGS(1))
return -1;
data_fname_out = GET_ARG();
} else if (is_prefix(*argv, "data_size_out")) {
char *endptr;
NEXT_ARG();
if (!REQ_ARGS(1))
return -1;
test_attr.data_size_out = strtoul(*argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as output data size",
*argv);
return -1;
}
NEXT_ARG();
} else if (is_prefix(*argv, "ctx_in")) {
NEXT_ARG();
if (!REQ_ARGS(1))
return -1;
ctx_fname_in = GET_ARG();
if (check_single_stdin(data_fname_in, ctx_fname_in))
return -1;
} else if (is_prefix(*argv, "ctx_out")) {
NEXT_ARG();
if (!REQ_ARGS(1))
return -1;
ctx_fname_out = GET_ARG();
} else if (is_prefix(*argv, "ctx_size_out")) {
char *endptr;
NEXT_ARG();
if (!REQ_ARGS(1))
return -1;
test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as output context size",
*argv);
return -1;
}
NEXT_ARG();
} else if (is_prefix(*argv, "repeat")) {
char *endptr;
NEXT_ARG();
if (!REQ_ARGS(1))
return -1;
repeat = strtoul(*argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as repeat number",
*argv);
return -1;
}
NEXT_ARG();
} else {
p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
*argv);
return -1;
}
}
err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
if (err)
return -1;
if (data_in) {
if (!test_attr.data_size_out)
test_attr.data_size_out = default_size;
err = alloc_run_data(&data_out, test_attr.data_size_out);
if (err)
goto free_data_in;
}
err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
if (err)
goto free_data_out;
if (ctx_in) {
if (!test_attr.ctx_size_out)
test_attr.ctx_size_out = default_size;
err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
if (err)
goto free_ctx_in;
}
test_attr.repeat = repeat;
test_attr.data_in = data_in;
test_attr.data_out = data_out;
test_attr.ctx_in = ctx_in;
test_attr.ctx_out = ctx_out;
err = bpf_prog_test_run_opts(fd, &test_attr);
if (err) {
p_err("failed to run program: %s", strerror(errno));
goto free_ctx_out;
}
err = 0;
if (json_output)
jsonw_start_object(json_wtr); /* root */
/* Do not exit on errors occurring when printing output data/context,
* we still want to print return value and duration for program run.
*/
if (test_attr.data_size_out)
err += print_run_output(test_attr.data_out,
test_attr.data_size_out,
data_fname_out, "data_out");
if (test_attr.ctx_size_out)
err += print_run_output(test_attr.ctx_out,
test_attr.ctx_size_out,
ctx_fname_out, "ctx_out");
if (json_output) {
jsonw_uint_field(json_wtr, "retval", test_attr.retval);
jsonw_uint_field(json_wtr, "duration", test_attr.duration);
jsonw_end_object(json_wtr); /* root */
} else {
fprintf(stdout, "Return value: %u, duration%s: %uns\n",
test_attr.retval,
repeat > 1 ? " (average)" : "", test_attr.duration);
}
free_ctx_out:
free(ctx_out);
free_ctx_in:
free(ctx_in);
free_data_out:
free(data_out);
free_data_in:
free(data_in);
return err;
}
static int
get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type)
{
libbpf_print_fn_t print_backup;
int ret;
ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
if (!ret)
return ret;
/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
print_backup = libbpf_set_print(print_all_levels);
ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
libbpf_set_print(print_backup);
return ret;
}
static int
auto_attach_program(struct bpf_program *prog, const char *path)
{
struct bpf_link *link;
int err;
link = bpf_program__attach(prog);
if (!link) {
p_info("Program %s does not support autoattach, falling back to pinning",
bpf_program__name(prog));
return bpf_obj_pin(bpf_program__fd(prog), path);
}
err = bpf_link__pin(link, path);
bpf_link__destroy(link);
return err;
}
static int
auto_attach_programs(struct bpf_object *obj, const char *path)
{
struct bpf_program *prog;
char buf[PATH_MAX];
int err;
bpf_object__for_each_program(prog, obj) {
err = pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog));
if (err)
goto err_unpin_programs;
err = auto_attach_program(prog, buf);
if (err)
goto err_unpin_programs;
}
return 0;
err_unpin_programs:
while ((prog = bpf_object__prev_program(obj, prog))) {
if (pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog)))
continue;
bpf_program__unpin(prog, buf);
}
return err;
}
static int load_with_options(int argc, char **argv, bool first_prog_only)
{
enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.relaxed_maps = relaxed_maps,
);
enum bpf_attach_type expected_attach_type;
struct map_replace *map_replace = NULL;
struct bpf_program *prog = NULL, *pos;
unsigned int old_map_fds = 0;
const char *pinmaps = NULL;
__u32 xdpmeta_ifindex = 0;
__u32 offload_ifindex = 0;
bool auto_attach = false;
struct bpf_object *obj;
struct bpf_map *map;
const char *pinfile;
unsigned int i, j;
const char *file;
int idx, err;
if (!REQ_ARGS(2))
return -1;
file = GET_ARG();
pinfile = GET_ARG();
while (argc) {
if (is_prefix(*argv, "type")) {
NEXT_ARG();
if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
p_err("program type already specified");
goto err_free_reuse_maps;
}
if (!REQ_ARGS(1))
goto err_free_reuse_maps;
err = libbpf_prog_type_by_name(*argv, &common_prog_type,
&expected_attach_type);
if (err < 0) {
/* Put a '/' at the end of type to appease libbpf */
char *type = malloc(strlen(*argv) + 2);
if (!type) {
p_err("mem alloc failed");
goto err_free_reuse_maps;
}
*type = 0;
strcat(type, *argv);
strcat(type, "/");
err = get_prog_type_by_name(type, &common_prog_type,
&expected_attach_type);
free(type);
if (err < 0)
goto err_free_reuse_maps;
}
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
void *new_map_replace;
char *endptr, *name;
int fd;
NEXT_ARG();
if (!REQ_ARGS(4))
goto err_free_reuse_maps;
if (is_prefix(*argv, "idx")) {
NEXT_ARG();
idx = strtoul(*argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as IDX", *argv);
goto err_free_reuse_maps;
}
name = NULL;
} else if (is_prefix(*argv, "name")) {
NEXT_ARG();
name = *argv;
idx = -1;
} else {
p_err("expected 'idx' or 'name', got: '%s'?",
*argv);
goto err_free_reuse_maps;
}
NEXT_ARG();
fd = map_parse_fd(&argc, &argv);
if (fd < 0)
goto err_free_reuse_maps;
new_map_replace = libbpf_reallocarray(map_replace,
old_map_fds + 1,
sizeof(*map_replace));
if (!new_map_replace) {
p_err("mem alloc failed");
goto err_free_reuse_maps;
}
map_replace = new_map_replace;
map_replace[old_map_fds].idx = idx;
map_replace[old_map_fds].name = name;
map_replace[old_map_fds].fd = fd;
old_map_fds++;
} else if (is_prefix(*argv, "dev")) {
p_info("Warning: 'bpftool prog load [...] dev <ifname>' syntax is deprecated.\n"
"Going further, please use 'offload_dev <ifname>' to offload program to device.\n"
"For applications using XDP hints only, use 'xdpmeta_dev <ifname>'.");
goto offload_dev;
} else if (is_prefix(*argv, "offload_dev")) {
offload_dev:
NEXT_ARG();
if (offload_ifindex) {
p_err("offload_dev already specified");
goto err_free_reuse_maps;
} else if (xdpmeta_ifindex) {
p_err("xdpmeta_dev and offload_dev are mutually exclusive");
goto err_free_reuse_maps;
}
if (!REQ_ARGS(1))
goto err_free_reuse_maps;
offload_ifindex = if_nametoindex(*argv);
if (!offload_ifindex) {
p_err("unrecognized netdevice '%s': %s",
*argv, strerror(errno));
goto err_free_reuse_maps;
}
NEXT_ARG();
} else if (is_prefix(*argv, "xdpmeta_dev")) {
NEXT_ARG();
if (xdpmeta_ifindex) {
p_err("xdpmeta_dev already specified");
goto err_free_reuse_maps;
} else if (offload_ifindex) {
p_err("xdpmeta_dev and offload_dev are mutually exclusive");
goto err_free_reuse_maps;
}
if (!REQ_ARGS(1))
goto err_free_reuse_maps;
xdpmeta_ifindex = if_nametoindex(*argv);
if (!xdpmeta_ifindex) {
p_err("unrecognized netdevice '%s': %s",
*argv, strerror(errno));
goto err_free_reuse_maps;
}
NEXT_ARG();
} else if (is_prefix(*argv, "pinmaps")) {
NEXT_ARG();
if (!REQ_ARGS(1))
goto err_free_reuse_maps;
pinmaps = GET_ARG();
} else if (is_prefix(*argv, "autoattach")) {
auto_attach = true;
NEXT_ARG();
} else {
p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
*argv);
goto err_free_reuse_maps;
}
}
set_max_rlimit();
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
open_opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_file(file, &open_opts);
if (!obj) {
p_err("failed to open object file");
goto err_free_reuse_maps;
}
bpf_object__for_each_program(pos, obj) {
enum bpf_prog_type prog_type = common_prog_type;
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
const char *sec_name = bpf_program__section_name(pos);
err = get_prog_type_by_name(sec_name, &prog_type,
&expected_attach_type);
if (err < 0)
goto err_close_obj;
}
if (prog_type == BPF_PROG_TYPE_XDP && xdpmeta_ifindex) {
bpf_program__set_flags(pos, BPF_F_XDP_DEV_BOUND_ONLY);
bpf_program__set_ifindex(pos, xdpmeta_ifindex);
} else {
bpf_program__set_ifindex(pos, offload_ifindex);
}
if (bpf_program__type(pos) != prog_type)
bpf_program__set_type(pos, prog_type);
bpf_program__set_expected_attach_type(pos, expected_attach_type);
}
qsort(map_replace, old_map_fds, sizeof(*map_replace),
map_replace_compar);
/* After the sort maps by name will be first on the list, because they
* have idx == -1. Resolve them.
*/
j = 0;
while (j < old_map_fds && map_replace[j].name) {
i = 0;
bpf_object__for_each_map(map, obj) {
if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
map_replace[j].idx = i;
break;
}
i++;
}
if (map_replace[j].idx == -1) {
p_err("unable to find map '%s'", map_replace[j].name);
goto err_close_obj;
}
j++;
}
/* Resort if any names were resolved */
if (j)
qsort(map_replace, old_map_fds, sizeof(*map_replace),
map_replace_compar);
/* Set ifindex and name reuse */
j = 0;
idx = 0;
bpf_object__for_each_map(map, obj) {
if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
bpf_map__set_ifindex(map, offload_ifindex);
if (j < old_map_fds && idx == map_replace[j].idx) {
err = bpf_map__reuse_fd(map, map_replace[j++].fd);
if (err) {
p_err("unable to set up map reuse: %d", err);
goto err_close_obj;
}
/* Next reuse wants to apply to the same map */
if (j < old_map_fds && map_replace[j].idx == idx) {
p_err("replacement for map idx %d specified more than once",
idx);
goto err_close_obj;
}
}
idx++;
}
if (j < old_map_fds) {
p_err("map idx '%d' not used", map_replace[j].idx);
goto err_close_obj;
}
err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
goto err_close_obj;
}
err = mount_bpffs_for_pin(pinfile, !first_prog_only);
if (err)
goto err_close_obj;
if (first_prog_only) {
prog = bpf_object__next_program(obj, NULL);
if (!prog) {
p_err("object file doesn't contain any bpf program");
goto err_close_obj;
}
if (auto_attach)
err = auto_attach_program(prog, pinfile);
else
err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
if (err) {
p_err("failed to pin program %s",
bpf_program__section_name(prog));
goto err_close_obj;
}
} else {
if (auto_attach)
err = auto_attach_programs(obj, pinfile);
else
err = bpf_object__pin_programs(obj, pinfile);
if (err) {
p_err("failed to pin all programs");
goto err_close_obj;
}
}
if (pinmaps) {
err = bpf_object__pin_maps(obj, pinmaps);
if (err) {
p_err("failed to pin all maps");
goto err_unpin;
}
}
if (json_output)
jsonw_null(json_wtr);
bpf_object__close(obj);
for (i = 0; i < old_map_fds; i++)
close(map_replace[i].fd);
free(map_replace);
return 0;
err_unpin:
if (first_prog_only)
unlink(pinfile);
else
bpf_object__unpin_programs(obj, pinfile);
err_close_obj:
bpf_object__close(obj);
err_free_reuse_maps:
for (i = 0; i < old_map_fds; i++)
close(map_replace[i].fd);
free(map_replace);
return -1;
}
static int count_open_fds(void)
{
DIR *dp = opendir("/proc/self/fd");
struct dirent *de;
int cnt = -3;
if (!dp)
return -1;
while ((de = readdir(dp)))
cnt++;
closedir(dp);
return cnt;
}
static int try_loader(struct gen_loader_opts *gen)
{
struct bpf_load_and_run_opts opts = {};
struct bpf_loader_ctx *ctx;
int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
sizeof(struct bpf_prog_desc));
int log_buf_sz = (1u << 24) - 1;
int err, fds_before, fd_delta;
char *log_buf = NULL;
ctx = alloca(ctx_sz);
memset(ctx, 0, ctx_sz);
ctx->sz = ctx_sz;
if (verifier_logs) {
ctx->log_level = 1 + 2 + 4;
ctx->log_size = log_buf_sz;
log_buf = malloc(log_buf_sz);
if (!log_buf)
return -ENOMEM;
ctx->log_buf = (long) log_buf;
}
opts.ctx = ctx;
opts.data = gen->data;
opts.data_sz = gen->data_sz;
opts.insns = gen->insns;
opts.insns_sz = gen->insns_sz;
fds_before = count_open_fds();
err = bpf_load_and_run(&opts);
fd_delta = count_open_fds() - fds_before;
if (err < 0 || verifier_logs) {
fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
if (fd_delta && err < 0)
fprintf(stderr, "loader prog leaked %d FDs\n",
fd_delta);
}
free(log_buf);
return err;
}
static int do_loader(int argc, char **argv)
{
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
struct bpf_object *obj;
const char *file;
int err = 0;
if (!REQ_ARGS(1))
return -1;
file = GET_ARG();
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
open_opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_file(file, &open_opts);
if (!obj) {
p_err("failed to open object file");
goto err_close_obj;
}
err = bpf_object__gen_loader(obj, &gen);
if (err)
goto err_close_obj;
err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
goto err_close_obj;
}
if (verifier_logs) {
struct dump_data dd = {};
kernel_syms_load(&dd);
dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
kernel_syms_destroy(&dd);
}
err = try_loader(&gen);
err_close_obj:
bpf_object__close(obj);
return err;
}
static int do_load(int argc, char **argv)
{
if (use_loader)
return do_loader(argc, argv);
return load_with_options(argc, argv, true);
}
static int do_loadall(int argc, char **argv)
{
return load_with_options(argc, argv, false);
}
#ifdef BPFTOOL_WITHOUT_SKELETONS
static int do_profile(int argc, char **argv)
{
p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
return 0;
}
#else /* BPFTOOL_WITHOUT_SKELETONS */
#include "profiler.skel.h"
struct profile_metric {
const char *name;
struct bpf_perf_event_value val;
struct perf_event_attr attr;
bool selected;
/* calculate ratios like instructions per cycle */
const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
const char *ratio_desc;
const float ratio_mul;
} metrics[] = {
{
.name = "cycles",
.attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.exclude_user = 1,
},
},
{
.name = "instructions",
.attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_INSTRUCTIONS,
.exclude_user = 1,
},
.ratio_metric = 1,
.ratio_desc = "insns per cycle",
.ratio_mul = 1.0,
},
{
.name = "l1d_loads",
.attr = {
.type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
.exclude_user = 1,
},
},
{
.name = "llc_misses",
.attr = {
.type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_LL |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
.exclude_user = 1
},
.ratio_metric = 2,
.ratio_desc = "LLC misses per million insns",
.ratio_mul = 1e6,
},
{
.name = "itlb_misses",
.attr = {
.type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_ITLB |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
.exclude_user = 1
},
.ratio_metric = 2,
.ratio_desc = "itlb misses per million insns",
.ratio_mul = 1e6,
},
{
.name = "dtlb_misses",
.attr = {
.type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_DTLB |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
.exclude_user = 1
},
.ratio_metric = 2,
.ratio_desc = "dtlb misses per million insns",
.ratio_mul = 1e6,
},
};
static __u64 profile_total_count;
#define MAX_NUM_PROFILE_METRICS 4
static int profile_parse_metrics(int argc, char **argv)
{
unsigned int metric_cnt;
int selected_cnt = 0;
unsigned int i;
metric_cnt = ARRAY_SIZE(metrics);
while (argc > 0) {
for (i = 0; i < metric_cnt; i++) {
if (is_prefix(argv[0], metrics[i].name)) {
if (!metrics[i].selected)
selected_cnt++;
metrics[i].selected = true;
break;
}
}
if (i == metric_cnt) {
p_err("unknown metric %s", argv[0]);
return -1;
}
NEXT_ARG();
}
if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
selected_cnt, MAX_NUM_PROFILE_METRICS);
return -1;
}
return selected_cnt;
}
static void profile_read_values(struct profiler_bpf *obj)
{
__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
int reading_map_fd, count_map_fd;
__u64 counts[num_cpu];
__u32 key = 0;
int err;
reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
count_map_fd = bpf_map__fd(obj->maps.counts);
if (reading_map_fd < 0 || count_map_fd < 0) {
p_err("failed to get fd for map");
return;
}
err = bpf_map_lookup_elem(count_map_fd, &key, counts);
if (err) {
p_err("failed to read count_map: %s", strerror(errno));
return;
}
profile_total_count = 0;
for (cpu = 0; cpu < num_cpu; cpu++)
profile_total_count += counts[cpu];
for (m = 0; m < ARRAY_SIZE(metrics); m++) {
struct bpf_perf_event_value values[num_cpu];
if (!metrics[m].selected)
continue;
err = bpf_map_lookup_elem(reading_map_fd, &key, values);
if (err) {
p_err("failed to read reading_map: %s",
strerror(errno));
return;
}
for (cpu = 0; cpu < num_cpu; cpu++) {
metrics[m].val.counter += values[cpu].counter;
metrics[m].val.enabled += values[cpu].enabled;
metrics[m].val.running += values[cpu].running;
}
key++;
}
}
static void profile_print_readings_json(void)
{
__u32 m;
jsonw_start_array(json_wtr);
for (m = 0; m < ARRAY_SIZE(metrics); m++) {
if (!metrics[m].selected)
continue;
jsonw_start_object(json_wtr);
jsonw_string_field(json_wtr, "metric", metrics[m].name);
jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
jsonw_end_object(json_wtr);
}
jsonw_end_array(json_wtr);
}
static void profile_print_readings_plain(void)
{
__u32 m;
printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
for (m = 0; m < ARRAY_SIZE(metrics); m++) {
struct bpf_perf_event_value *val = &metrics[m].val;
int r;
if (!metrics[m].selected)
continue;
printf("%18llu %-20s", val->counter, metrics[m].name);
r = metrics[m].ratio_metric - 1;
if (r >= 0 && metrics[r].selected &&
metrics[r].val.counter > 0) {
printf("# %8.2f %-30s",
val->counter * metrics[m].ratio_mul /
metrics[r].val.counter,
metrics[m].ratio_desc);
} else {
printf("%-41s", "");
}
if (val->enabled > val->running)
printf("(%4.2f%%)",
val->running * 100.0 / val->enabled);
printf("\n");
}
}
static void profile_print_readings(void)
{
if (json_output)
profile_print_readings_json();
else
profile_print_readings_plain();
}
static char *profile_target_name(int tgt_fd)
{
struct bpf_func_info func_info;
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
const struct btf_type *t;
__u32 func_info_rec_size;
struct btf *btf = NULL;
char *name = NULL;
int err;
err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
if (err) {
p_err("failed to get info for prog FD %d", tgt_fd);
goto out;
}
if (info.btf_id == 0) {
p_err("prog FD %d doesn't have valid btf", tgt_fd);
goto out;
}
func_info_rec_size = info.func_info_rec_size;
if (info.nr_func_info == 0) {
p_err("found 0 func_info for prog FD %d", tgt_fd);
goto out;
}
memset(&info, 0, sizeof(info));
info.nr_func_info = 1;
info.func_info_rec_size = func_info_rec_size;
info.func_info = ptr_to_u64(&func_info);
err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
if (err) {
p_err("failed to get func_info for prog FD %d", tgt_fd);
goto out;
}
btf = btf__load_from_kernel_by_id(info.btf_id);
if (!btf) {
p_err("failed to load btf for prog FD %d", tgt_fd);
goto out;
}
t = btf__type_by_id(btf, func_info.type_id);
if (!t) {
p_err("btf %d doesn't have type %d",
info.btf_id, func_info.type_id);
goto out;
}
name = strdup(btf__name_by_offset(btf, t->name_off));
out:
btf__free(btf);
return name;
}
static struct profiler_bpf *profile_obj;
static int profile_tgt_fd = -1;
static char *profile_tgt_name;
static int *profile_perf_events;
static int profile_perf_event_cnt;
static void profile_close_perf_events(struct profiler_bpf *obj)
{
int i;
for (i = profile_perf_event_cnt - 1; i >= 0; i--)
close(profile_perf_events[i]);
free(profile_perf_events);
profile_perf_event_cnt = 0;
}
static int profile_open_perf_event(int mid, int cpu, int map_fd)
{
int pmu_fd;
pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
-1 /*pid*/, cpu, -1 /*group_fd*/, 0);
if (pmu_fd < 0) {
if (errno == ENODEV) {
p_info("cpu %d may be offline, skip %s profiling.",
cpu, metrics[mid].name);
profile_perf_event_cnt++;
return 0;
}
return -1;
}
if (bpf_map_update_elem(map_fd,
&profile_perf_event_cnt,
&pmu_fd, BPF_ANY) ||
ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
close(pmu_fd);
return -1;
}
profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
return 0;
}
static int profile_open_perf_events(struct profiler_bpf *obj)
{
unsigned int cpu, m;
int map_fd;
profile_perf_events = calloc(
sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
if (!profile_perf_events) {
p_err("failed to allocate memory for perf_event array: %s",
strerror(errno));
return -1;
}
map_fd = bpf_map__fd(obj->maps.events);
if (map_fd < 0) {
p_err("failed to get fd for events map");
return -1;
}
for (m = 0; m < ARRAY_SIZE(metrics); m++) {
if (!metrics[m].selected)
continue;
for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
if (profile_open_perf_event(m, cpu, map_fd)) {
p_err("failed to create event %s on cpu %d",
metrics[m].name, cpu);
return -1;
}
}
}
return 0;
}
static void profile_print_and_cleanup(void)
{
profile_close_perf_events(profile_obj);
profile_read_values(profile_obj);
profile_print_readings();
profiler_bpf__destroy(profile_obj);
close(profile_tgt_fd);
free(profile_tgt_name);
}
static void int_exit(int signo)
{
profile_print_and_cleanup();
exit(0);
}
static int do_profile(int argc, char **argv)
{
int num_metric, num_cpu, err = -1;
struct bpf_program *prog;
unsigned long duration;
char *endptr;
/* we at least need two args for the prog and one metric */
if (!REQ_ARGS(3))
return -EINVAL;
/* parse target fd */
profile_tgt_fd = prog_parse_fd(&argc, &argv);
if (profile_tgt_fd < 0) {
p_err("failed to parse fd");
return -1;
}
/* parse profiling optional duration */
if (argc > 2 && is_prefix(argv[0], "duration")) {
NEXT_ARG();
duration = strtoul(*argv, &endptr, 0);
if (*endptr)
usage();
NEXT_ARG();
} else {
duration = UINT_MAX;
}
num_metric = profile_parse_metrics(argc, argv);
if (num_metric <= 0)
goto out;
num_cpu = libbpf_num_possible_cpus();
if (num_cpu <= 0) {
p_err("failed to identify number of CPUs");
goto out;
}
profile_obj = profiler_bpf__open();
if (!profile_obj) {
p_err("failed to open and/or load BPF object");
goto out;
}
profile_obj->rodata->num_cpu = num_cpu;
profile_obj->rodata->num_metric = num_metric;
/* adjust map sizes */
bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
bpf_map__set_max_entries(profile_obj->maps.counts, 1);
/* change target name */
profile_tgt_name = profile_target_name(profile_tgt_fd);
if (!profile_tgt_name)
goto out;
bpf_object__for_each_program(prog, profile_obj->obj) {
err = bpf_program__set_attach_target(prog, profile_tgt_fd,
profile_tgt_name);
if (err) {
p_err("failed to set attach target\n");
goto out;
}
}
set_max_rlimit();
err = profiler_bpf__load(profile_obj);
if (err) {
p_err("failed to load profile_obj");
goto out;
}
err = profile_open_perf_events(profile_obj);
if (err)
goto out;
err = profiler_bpf__attach(profile_obj);
if (err) {
p_err("failed to attach profile_obj");
goto out;
}
signal(SIGINT, int_exit);
sleep(duration);
profile_print_and_cleanup();
return 0;
out:
profile_close_perf_events(profile_obj);
if (profile_obj)
profiler_bpf__destroy(profile_obj);
close(profile_tgt_fd);
free(profile_tgt_name);
return err;
}
#endif /* BPFTOOL_WITHOUT_SKELETONS */
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s { show | list } [PROG]\n"
" %1$s %2$s dump xlated PROG [{ file FILE | [opcodes] [linum] [visual] }]\n"
" %1$s %2$s dump jited PROG [{ file FILE | [opcodes] [linum] }]\n"
" %1$s %2$s pin PROG FILE\n"
" %1$s %2$s { load | loadall } OBJ PATH \\\n"
" [type TYPE] [{ offload_dev | xdpmeta_dev } NAME] \\\n"
" [map { idx IDX | name NAME } MAP]\\\n"
" [pinmaps MAP_DIR]\n"
" [autoattach]\n"
" %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
" %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
" %1$s %2$s run PROG \\\n"
" data_in FILE \\\n"
" [data_out FILE [data_size_out L]] \\\n"
" [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
" [repeat N]\n"
" %1$s %2$s profile PROG [duration DURATION] METRICs\n"
" %1$s %2$s tracelog\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
" TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
" tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
" cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
" lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
" sk_reuseport | flow_dissector | cgroup/sysctl |\n"
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/getpeername4 | cgroup/getpeername6 |\n"
" cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
" cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
" cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
" struct_ops | fentry | fexit | freplace | sk_lookup }\n"
" ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
" sk_skb_stream_parser | flow_dissector }\n"
" METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
" {-L|--use-loader} }\n"
"",
bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
{ "help", do_help },
{ "dump", do_dump },
{ "pin", do_pin },
{ "load", do_load },
{ "loadall", do_loadall },
{ "attach", do_attach },
{ "detach", do_detach },
{ "tracelog", do_tracelog },
{ "run", do_run },
{ "profile", do_profile },
{ 0 }
};
int do_prog(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/prog.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <errno.h>
#include <fcntl.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <net/if.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/hashmap.h>
#include "json_writer.h"
#include "main.h"
static struct hashmap *map_table;
static bool map_is_per_cpu(__u32 type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
type == BPF_MAP_TYPE_PERCPU_ARRAY ||
type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
}
static bool map_is_map_of_maps(__u32 type)
{
return type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
type == BPF_MAP_TYPE_HASH_OF_MAPS;
}
static bool map_is_map_of_progs(__u32 type)
{
return type == BPF_MAP_TYPE_PROG_ARRAY;
}
static int map_type_from_str(const char *type)
{
const char *map_type_str;
unsigned int i;
for (i = 0; ; i++) {
map_type_str = libbpf_bpf_map_type_str(i);
if (!map_type_str)
break;
/* Don't allow prefixing in case of possible future shadowing */
if (!strcmp(map_type_str, type))
return i;
}
return -1;
}
static void *alloc_value(struct bpf_map_info *info)
{
if (map_is_per_cpu(info->type))
return malloc(round_up(info->value_size, 8) *
get_possible_cpus());
else
return malloc(info->value_size);
}
static int do_dump_btf(const struct btf_dumper *d,
struct bpf_map_info *map_info, void *key,
void *value)
{
__u32 value_id;
int ret = 0;
/* start of key-value pair */
jsonw_start_object(d->jw);
if (map_info->btf_key_type_id) {
jsonw_name(d->jw, "key");
ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
if (ret)
goto err_end_obj;
}
value_id = map_info->btf_vmlinux_value_type_id ?
: map_info->btf_value_type_id;
if (!map_is_per_cpu(map_info->type)) {
jsonw_name(d->jw, "value");
ret = btf_dumper_type(d, value_id, value);
} else {
unsigned int i, n, step;
jsonw_name(d->jw, "values");
jsonw_start_array(d->jw);
n = get_possible_cpus();
step = round_up(map_info->value_size, 8);
for (i = 0; i < n; i++) {
jsonw_start_object(d->jw);
jsonw_int_field(d->jw, "cpu", i);
jsonw_name(d->jw, "value");
ret = btf_dumper_type(d, value_id, value + i * step);
jsonw_end_object(d->jw);
if (ret)
break;
}
jsonw_end_array(d->jw);
}
err_end_obj:
/* end of key-value pair */
jsonw_end_object(d->jw);
return ret;
}
static json_writer_t *get_btf_writer(void)
{
json_writer_t *jw = jsonw_new(stdout);
if (!jw)
return NULL;
jsonw_pretty(jw, true);
return jw;
}
static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
unsigned char *value, struct btf *btf)
{
jsonw_start_object(json_wtr);
if (!map_is_per_cpu(info->type)) {
jsonw_name(json_wtr, "key");
print_hex_data_json(key, info->key_size);
jsonw_name(json_wtr, "value");
print_hex_data_json(value, info->value_size);
if (map_is_map_of_maps(info->type))
jsonw_uint_field(json_wtr, "inner_map_id",
*(unsigned int *)value);
if (btf) {
struct btf_dumper d = {
.btf = btf,
.jw = json_wtr,
.is_plain_text = false,
};
jsonw_name(json_wtr, "formatted");
do_dump_btf(&d, info, key, value);
}
} else {
unsigned int i, n, step;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
jsonw_name(json_wtr, "key");
print_hex_data_json(key, info->key_size);
jsonw_name(json_wtr, "values");
jsonw_start_array(json_wtr);
for (i = 0; i < n; i++) {
jsonw_start_object(json_wtr);
jsonw_int_field(json_wtr, "cpu", i);
jsonw_name(json_wtr, "value");
print_hex_data_json(value + i * step,
info->value_size);
jsonw_end_object(json_wtr);
}
jsonw_end_array(json_wtr);
if (btf) {
struct btf_dumper d = {
.btf = btf,
.jw = json_wtr,
.is_plain_text = false,
};
jsonw_name(json_wtr, "formatted");
do_dump_btf(&d, info, key, value);
}
}
jsonw_end_object(json_wtr);
}
static void
print_entry_error_msg(struct bpf_map_info *info, unsigned char *key,
const char *error_msg)
{
int msg_size = strlen(error_msg);
bool single_line, break_names;
break_names = info->key_size > 16 || msg_size > 16;
single_line = info->key_size + msg_size <= 24 && !break_names;
printf("key:%c", break_names ? '\n' : ' ');
fprint_hex(stdout, key, info->key_size, " ");
printf(single_line ? " " : "\n");
printf("value:%c%s", break_names ? '\n' : ' ', error_msg);
printf("\n");
}
static void
print_entry_error(struct bpf_map_info *map_info, void *key, int lookup_errno)
{
/* For prog_array maps or arrays of maps, failure to lookup the value
* means there is no entry for that key. Do not print an error message
* in that case.
*/
if ((map_is_map_of_maps(map_info->type) ||
map_is_map_of_progs(map_info->type)) && lookup_errno == ENOENT)
return;
if (json_output) {
jsonw_start_object(json_wtr); /* entry */
jsonw_name(json_wtr, "key");
print_hex_data_json(key, map_info->key_size);
jsonw_name(json_wtr, "value");
jsonw_start_object(json_wtr); /* error */
jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
jsonw_end_object(json_wtr); /* error */
jsonw_end_object(json_wtr); /* entry */
} else {
const char *msg = NULL;
if (lookup_errno == ENOENT)
msg = "<no entry>";
else if (lookup_errno == ENOSPC &&
map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
msg = "<cannot read>";
print_entry_error_msg(map_info, key,
msg ? : strerror(lookup_errno));
}
}
static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
unsigned char *value)
{
if (!map_is_per_cpu(info->type)) {
bool single_line, break_names;
break_names = info->key_size > 16 || info->value_size > 16;
single_line = info->key_size + info->value_size <= 24 &&
!break_names;
if (info->key_size) {
printf("key:%c", break_names ? '\n' : ' ');
fprint_hex(stdout, key, info->key_size, " ");
printf(single_line ? " " : "\n");
}
if (info->value_size) {
if (map_is_map_of_maps(info->type)) {
printf("inner_map_id:%c", break_names ? '\n' : ' ');
printf("%u ", *(unsigned int *)value);
} else {
printf("value:%c", break_names ? '\n' : ' ');
fprint_hex(stdout, value, info->value_size, " ");
}
}
printf("\n");
} else {
unsigned int i, n, step;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
if (info->key_size) {
printf("key:\n");
fprint_hex(stdout, key, info->key_size, " ");
printf("\n");
}
if (info->value_size) {
for (i = 0; i < n; i++) {
printf("value (CPU %02d):%c",
i, info->value_size > 16 ? '\n' : ' ');
fprint_hex(stdout, value + i * step,
info->value_size, " ");
printf("\n");
}
}
}
}
static char **parse_bytes(char **argv, const char *name, unsigned char *val,
unsigned int n)
{
unsigned int i = 0, base = 0;
char *endptr;
if (is_prefix(*argv, "hex")) {
base = 16;
argv++;
}
while (i < n && argv[i]) {
val[i] = strtoul(argv[i], &endptr, base);
if (*endptr) {
p_err("error parsing byte: %s", argv[i]);
return NULL;
}
i++;
}
if (i != n) {
p_err("%s expected %d bytes got %d", name, n, i);
return NULL;
}
return argv + i;
}
/* on per cpu maps we must copy the provided value on all value instances */
static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
{
unsigned int i, n, step;
if (!map_is_per_cpu(info->type))
return;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
for (i = 1; i < n; i++)
memcpy(value + i * step, value, info->value_size);
}
static int parse_elem(char **argv, struct bpf_map_info *info,
void *key, void *value, __u32 key_size, __u32 value_size,
__u32 *flags, __u32 **value_fd)
{
if (!*argv) {
if (!key && !value)
return 0;
p_err("did not find %s", key ? "key" : "value");
return -1;
}
if (is_prefix(*argv, "key")) {
if (!key) {
if (key_size)
p_err("duplicate key");
else
p_err("unnecessary key");
return -1;
}
argv = parse_bytes(argv + 1, "key", key, key_size);
if (!argv)
return -1;
return parse_elem(argv, info, NULL, value, key_size, value_size,
flags, value_fd);
} else if (is_prefix(*argv, "value")) {
int fd;
if (!value) {
if (value_size)
p_err("duplicate value");
else
p_err("unnecessary value");
return -1;
}
argv++;
if (map_is_map_of_maps(info->type)) {
int argc = 2;
if (value_size != 4) {
p_err("value smaller than 4B for map in map?");
return -1;
}
if (!argv[0] || !argv[1]) {
p_err("not enough value arguments for map in map");
return -1;
}
fd = map_parse_fd(&argc, &argv);
if (fd < 0)
return -1;
*value_fd = value;
**value_fd = fd;
} else if (map_is_map_of_progs(info->type)) {
int argc = 2;
if (value_size != 4) {
p_err("value smaller than 4B for map of progs?");
return -1;
}
if (!argv[0] || !argv[1]) {
p_err("not enough value arguments for map of progs");
return -1;
}
if (is_prefix(*argv, "id"))
p_info("Warning: updating program array via MAP_ID, make sure this map is kept open\n"
" by some process or pinned otherwise update will be lost");
fd = prog_parse_fd(&argc, &argv);
if (fd < 0)
return -1;
*value_fd = value;
**value_fd = fd;
} else {
argv = parse_bytes(argv, "value", value, value_size);
if (!argv)
return -1;
fill_per_cpu_value(info, value);
}
return parse_elem(argv, info, key, NULL, key_size, value_size,
flags, NULL);
} else if (is_prefix(*argv, "any") || is_prefix(*argv, "noexist") ||
is_prefix(*argv, "exist")) {
if (!flags) {
p_err("flags specified multiple times: %s", *argv);
return -1;
}
if (is_prefix(*argv, "any"))
*flags = BPF_ANY;
else if (is_prefix(*argv, "noexist"))
*flags = BPF_NOEXIST;
else if (is_prefix(*argv, "exist"))
*flags = BPF_EXIST;
return parse_elem(argv + 1, info, key, value, key_size,
value_size, NULL, value_fd);
}
p_err("expected key or value, got: %s", *argv);
return -1;
}
static void show_map_header_json(struct bpf_map_info *info, json_writer_t *wtr)
{
const char *map_type_str;
jsonw_uint_field(wtr, "id", info->id);
map_type_str = libbpf_bpf_map_type_str(info->type);
if (map_type_str)
jsonw_string_field(wtr, "type", map_type_str);
else
jsonw_uint_field(wtr, "type", info->type);
if (*info->name)
jsonw_string_field(wtr, "name", info->name);
jsonw_name(wtr, "flags");
jsonw_printf(wtr, "%d", info->map_flags);
}
static int show_map_close_json(int fd, struct bpf_map_info *info)
{
char *memlock, *frozen_str;
int frozen = 0;
memlock = get_fdinfo(fd, "memlock");
frozen_str = get_fdinfo(fd, "frozen");
jsonw_start_object(json_wtr);
show_map_header_json(info, json_wtr);
print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
jsonw_uint_field(json_wtr, "bytes_key", info->key_size);
jsonw_uint_field(json_wtr, "bytes_value", info->value_size);
jsonw_uint_field(json_wtr, "max_entries", info->max_entries);
if (memlock)
jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
free(memlock);
if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
char *owner_jited = get_fdinfo(fd, "owner_jited");
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
const char *prog_type_str;
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
if (prog_type_str)
jsonw_string_field(json_wtr, "owner_prog_type",
prog_type_str);
else
jsonw_uint_field(json_wtr, "owner_prog_type",
prog_type);
}
if (owner_jited)
jsonw_bool_field(json_wtr, "owner_jited",
!!atoi(owner_jited));
free(owner_prog_type);
free(owner_jited);
}
close(fd);
if (frozen_str) {
frozen = atoi(frozen_str);
free(frozen_str);
}
jsonw_int_field(json_wtr, "frozen", frozen);
if (info->btf_id)
jsonw_int_field(json_wtr, "btf_id", info->btf_id);
if (!hashmap__empty(map_table)) {
struct hashmap_entry *entry;
jsonw_name(json_wtr, "pinned");
jsonw_start_array(json_wtr);
hashmap__for_each_key_entry(map_table, entry, info->id)
jsonw_string(json_wtr, entry->pvalue);
jsonw_end_array(json_wtr);
}
emit_obj_refs_json(refs_table, info->id, json_wtr);
jsonw_end_object(json_wtr);
return 0;
}
static void show_map_header_plain(struct bpf_map_info *info)
{
const char *map_type_str;
printf("%u: ", info->id);
map_type_str = libbpf_bpf_map_type_str(info->type);
if (map_type_str)
printf("%s ", map_type_str);
else
printf("type %u ", info->type);
if (*info->name)
printf("name %s ", info->name);
printf("flags 0x%x", info->map_flags);
print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
printf("\n");
}
static int show_map_close_plain(int fd, struct bpf_map_info *info)
{
char *memlock, *frozen_str;
int frozen = 0;
memlock = get_fdinfo(fd, "memlock");
frozen_str = get_fdinfo(fd, "frozen");
show_map_header_plain(info);
printf("\tkey %uB value %uB max_entries %u",
info->key_size, info->value_size, info->max_entries);
if (memlock)
printf(" memlock %sB", memlock);
free(memlock);
if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
char *owner_jited = get_fdinfo(fd, "owner_jited");
if (owner_prog_type || owner_jited)
printf("\n\t");
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
const char *prog_type_str;
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
if (prog_type_str)
printf("owner_prog_type %s ", prog_type_str);
else
printf("owner_prog_type %d ", prog_type);
}
if (owner_jited)
printf("owner%s jited",
atoi(owner_jited) ? "" : " not");
free(owner_prog_type);
free(owner_jited);
}
close(fd);
if (!hashmap__empty(map_table)) {
struct hashmap_entry *entry;
hashmap__for_each_key_entry(map_table, entry, info->id)
printf("\n\tpinned %s", (char *)entry->pvalue);
}
if (frozen_str) {
frozen = atoi(frozen_str);
free(frozen_str);
}
if (info->btf_id || frozen)
printf("\n\t");
if (info->btf_id)
printf("btf_id %d", info->btf_id);
if (frozen)
printf("%sfrozen", info->btf_id ? " " : "");
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
printf("\n");
return 0;
}
static int do_show_subset(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
int *fds = NULL;
int nb_fds, i;
int err = -1;
fds = malloc(sizeof(int));
if (!fds) {
p_err("mem alloc failed");
return -1;
}
nb_fds = map_parse_fds(&argc, &argv, &fds);
if (nb_fds < 1)
goto exit_free;
if (json_output && nb_fds > 1)
jsonw_start_array(json_wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
err = bpf_map_get_info_by_fd(fds[i], &info, &len);
if (err) {
p_err("can't get map info: %s",
strerror(errno));
for (; i < nb_fds; i++)
close(fds[i]);
break;
}
if (json_output)
show_map_close_json(fds[i], &info);
else
show_map_close_plain(fds[i], &info);
close(fds[i]);
}
if (json_output && nb_fds > 1)
jsonw_end_array(json_wtr); /* root array */
exit_free:
free(fds);
return err;
}
static int do_show(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
__u32 id = 0;
int err;
int fd;
if (show_pinned) {
map_table = hashmap__new(hash_fn_for_key_as_id,
equal_fn_for_key_as_id, NULL);
if (IS_ERR(map_table)) {
p_err("failed to create hashmap for pinned paths");
return -1;
}
build_pinned_obj_table(map_table, BPF_OBJ_MAP);
}
build_obj_refs_table(&refs_table, BPF_OBJ_MAP);
if (argc == 2)
return do_show_subset(argc, argv);
if (argc)
return BAD_ARG();
if (json_output)
jsonw_start_array(json_wtr);
while (true) {
err = bpf_map_get_next_id(id, &id);
if (err) {
if (errno == ENOENT)
break;
p_err("can't get next map: %s%s", strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
break;
}
fd = bpf_map_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
p_err("can't get map by id (%u): %s",
id, strerror(errno));
break;
}
err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
break;
}
if (json_output)
show_map_close_json(fd, &info);
else
show_map_close_plain(fd, &info);
}
if (json_output)
jsonw_end_array(json_wtr);
delete_obj_refs_table(refs_table);
if (show_pinned)
delete_pinned_obj_table(map_table);
return errno == ENOENT ? 0 : -1;
}
static int dump_map_elem(int fd, void *key, void *value,
struct bpf_map_info *map_info, struct btf *btf,
json_writer_t *btf_wtr)
{
if (bpf_map_lookup_elem(fd, key, value)) {
print_entry_error(map_info, key, errno);
return -1;
}
if (json_output) {
print_entry_json(map_info, key, value, btf);
} else if (btf) {
struct btf_dumper d = {
.btf = btf,
.jw = btf_wtr,
.is_plain_text = true,
};
do_dump_btf(&d, map_info, key, value);
} else {
print_entry_plain(map_info, key, value);
}
return 0;
}
static int maps_have_btf(int *fds, int nb_fds)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
int err, i;
for (i = 0; i < nb_fds; i++) {
err = bpf_map_get_info_by_fd(fds[i], &info, &len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
return -1;
}
if (!info.btf_id)
return 0;
}
return 1;
}
static struct btf *btf_vmlinux;
static int get_map_kv_btf(const struct bpf_map_info *info, struct btf **btf)
{
int err = 0;
if (info->btf_vmlinux_value_type_id) {
if (!btf_vmlinux) {
btf_vmlinux = libbpf_find_kernel_btf();
if (!btf_vmlinux) {
p_err("failed to get kernel btf");
return -errno;
}
}
*btf = btf_vmlinux;
} else if (info->btf_value_type_id) {
*btf = btf__load_from_kernel_by_id(info->btf_id);
if (!*btf) {
err = -errno;
p_err("failed to get btf");
}
} else {
*btf = NULL;
}
return err;
}
static void free_map_kv_btf(struct btf *btf)
{
if (btf != btf_vmlinux)
btf__free(btf);
}
static int
map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
bool show_header)
{
void *key, *value, *prev_key;
unsigned int num_elems = 0;
struct btf *btf = NULL;
int err;
key = malloc(info->key_size);
value = alloc_value(info);
if (!key || !value) {
p_err("mem alloc failed");
err = -1;
goto exit_free;
}
prev_key = NULL;
if (wtr) {
err = get_map_kv_btf(info, &btf);
if (err) {
goto exit_free;
}
if (show_header) {
jsonw_start_object(wtr); /* map object */
show_map_header_json(info, wtr);
jsonw_name(wtr, "elements");
}
jsonw_start_array(wtr); /* elements */
} else if (show_header) {
show_map_header_plain(info);
}
if (info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
info->value_size != 8) {
const char *map_type_str;
map_type_str = libbpf_bpf_map_type_str(info->type);
p_info("Warning: cannot read values from %s map with value_size != 8",
map_type_str);
}
while (true) {
err = bpf_map_get_next_key(fd, prev_key, key);
if (err) {
if (errno == ENOENT)
err = 0;
break;
}
if (!dump_map_elem(fd, key, value, info, btf, wtr))
num_elems++;
prev_key = key;
}
if (wtr) {
jsonw_end_array(wtr); /* elements */
if (show_header)
jsonw_end_object(wtr); /* map object */
} else {
printf("Found %u element%s\n", num_elems,
num_elems != 1 ? "s" : "");
}
exit_free:
free(key);
free(value);
close(fd);
free_map_kv_btf(btf);
return err;
}
static int do_dump(int argc, char **argv)
{
json_writer_t *wtr = NULL, *btf_wtr = NULL;
struct bpf_map_info info = {};
int nb_fds, i = 0;
__u32 len = sizeof(info);
int *fds = NULL;
int err = -1;
if (argc != 2)
usage();
fds = malloc(sizeof(int));
if (!fds) {
p_err("mem alloc failed");
return -1;
}
nb_fds = map_parse_fds(&argc, &argv, &fds);
if (nb_fds < 1)
goto exit_free;
if (json_output) {
wtr = json_wtr;
} else {
int do_plain_btf;
do_plain_btf = maps_have_btf(fds, nb_fds);
if (do_plain_btf < 0)
goto exit_close;
if (do_plain_btf) {
btf_wtr = get_btf_writer();
wtr = btf_wtr;
if (!btf_wtr)
p_info("failed to create json writer for btf. falling back to plain output");
}
}
if (wtr && nb_fds > 1)
jsonw_start_array(wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
if (bpf_map_get_info_by_fd(fds[i], &info, &len)) {
p_err("can't get map info: %s", strerror(errno));
break;
}
err = map_dump(fds[i], &info, wtr, nb_fds > 1);
if (!wtr && i != nb_fds - 1)
printf("\n");
if (err)
break;
close(fds[i]);
}
if (wtr && nb_fds > 1)
jsonw_end_array(wtr); /* root array */
if (btf_wtr)
jsonw_destroy(&btf_wtr);
exit_close:
for (; i < nb_fds; i++)
close(fds[i]);
exit_free:
free(fds);
btf__free(btf_vmlinux);
return err;
}
static int alloc_key_value(struct bpf_map_info *info, void **key, void **value)
{
*key = NULL;
*value = NULL;
if (info->key_size) {
*key = malloc(info->key_size);
if (!*key) {
p_err("key mem alloc failed");
return -1;
}
}
if (info->value_size) {
*value = alloc_value(info);
if (!*value) {
p_err("value mem alloc failed");
free(*key);
*key = NULL;
return -1;
}
}
return 0;
}
static int do_update(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
__u32 *value_fd = NULL;
__u32 flags = BPF_ANY;
void *key, *value;
int fd, err;
if (argc < 2)
usage();
fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
if (fd < 0)
return -1;
err = alloc_key_value(&info, &key, &value);
if (err)
goto exit_free;
err = parse_elem(argv, &info, key, value, info.key_size,
info.value_size, &flags, &value_fd);
if (err)
goto exit_free;
err = bpf_map_update_elem(fd, key, value, flags);
if (err) {
p_err("update failed: %s", strerror(errno));
goto exit_free;
}
exit_free:
if (value_fd)
close(*value_fd);
free(key);
free(value);
close(fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
}
static void print_key_value(struct bpf_map_info *info, void *key,
void *value)
{
json_writer_t *btf_wtr;
struct btf *btf;
if (get_map_kv_btf(info, &btf))
return;
if (json_output) {
print_entry_json(info, key, value, btf);
} else if (btf) {
/* if here json_wtr wouldn't have been initialised,
* so let's create separate writer for btf
*/
btf_wtr = get_btf_writer();
if (!btf_wtr) {
p_info("failed to create json writer for btf. falling back to plain output");
btf__free(btf);
btf = NULL;
print_entry_plain(info, key, value);
} else {
struct btf_dumper d = {
.btf = btf,
.jw = btf_wtr,
.is_plain_text = true,
};
do_dump_btf(&d, info, key, value);
jsonw_destroy(&btf_wtr);
}
} else {
print_entry_plain(info, key, value);
}
btf__free(btf);
}
static int do_lookup(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
void *key, *value;
int err;
int fd;
if (argc < 2)
usage();
fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
if (fd < 0)
return -1;
err = alloc_key_value(&info, &key, &value);
if (err)
goto exit_free;
err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
if (err)
goto exit_free;
err = bpf_map_lookup_elem(fd, key, value);
if (err) {
if (errno == ENOENT) {
if (json_output) {
jsonw_null(json_wtr);
} else {
printf("key:\n");
fprint_hex(stdout, key, info.key_size, " ");
printf("\n\nNot found\n");
}
} else {
p_err("lookup failed: %s", strerror(errno));
}
goto exit_free;
}
/* here means bpf_map_lookup_elem() succeeded */
print_key_value(&info, key, value);
exit_free:
free(key);
free(value);
close(fd);
return err;
}
static int do_getnext(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
void *key, *nextkey;
int err;
int fd;
if (argc < 2)
usage();
fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
if (fd < 0)
return -1;
key = malloc(info.key_size);
nextkey = malloc(info.key_size);
if (!key || !nextkey) {
p_err("mem alloc failed");
err = -1;
goto exit_free;
}
if (argc) {
err = parse_elem(argv, &info, key, NULL, info.key_size, 0,
NULL, NULL);
if (err)
goto exit_free;
} else {
free(key);
key = NULL;
}
err = bpf_map_get_next_key(fd, key, nextkey);
if (err) {
p_err("can't get next key: %s", strerror(errno));
goto exit_free;
}
if (json_output) {
jsonw_start_object(json_wtr);
if (key) {
jsonw_name(json_wtr, "key");
print_hex_data_json(key, info.key_size);
} else {
jsonw_null_field(json_wtr, "key");
}
jsonw_name(json_wtr, "next_key");
print_hex_data_json(nextkey, info.key_size);
jsonw_end_object(json_wtr);
} else {
if (key) {
printf("key:\n");
fprint_hex(stdout, key, info.key_size, " ");
printf("\n");
} else {
printf("key: None\n");
}
printf("next key:\n");
fprint_hex(stdout, nextkey, info.key_size, " ");
printf("\n");
}
exit_free:
free(nextkey);
free(key);
close(fd);
return err;
}
static int do_delete(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
void *key;
int err;
int fd;
if (argc < 2)
usage();
fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
if (fd < 0)
return -1;
key = malloc(info.key_size);
if (!key) {
p_err("mem alloc failed");
err = -1;
goto exit_free;
}
err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
if (err)
goto exit_free;
err = bpf_map_delete_elem(fd, key);
if (err)
p_err("delete failed: %s", strerror(errno));
exit_free:
free(key);
close(fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
}
static int do_pin(int argc, char **argv)
{
int err;
err = do_pin_any(argc, argv, map_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
}
static int do_create(int argc, char **argv)
{
LIBBPF_OPTS(bpf_map_create_opts, attr);
enum bpf_map_type map_type = BPF_MAP_TYPE_UNSPEC;
__u32 key_size = 0, value_size = 0, max_entries = 0;
const char *map_name = NULL;
const char *pinfile;
int err = -1, fd;
if (!REQ_ARGS(7))
return -1;
pinfile = GET_ARG();
while (argc) {
if (!REQ_ARGS(2))
return -1;
if (is_prefix(*argv, "type")) {
NEXT_ARG();
if (map_type) {
p_err("map type already specified");
goto exit;
}
map_type = map_type_from_str(*argv);
if ((int)map_type < 0) {
p_err("unrecognized map type: %s", *argv);
goto exit;
}
NEXT_ARG();
} else if (is_prefix(*argv, "name")) {
NEXT_ARG();
map_name = GET_ARG();
} else if (is_prefix(*argv, "key")) {
if (parse_u32_arg(&argc, &argv, &key_size,
"key size"))
goto exit;
} else if (is_prefix(*argv, "value")) {
if (parse_u32_arg(&argc, &argv, &value_size,
"value size"))
goto exit;
} else if (is_prefix(*argv, "entries")) {
if (parse_u32_arg(&argc, &argv, &max_entries,
"max entries"))
goto exit;
} else if (is_prefix(*argv, "flags")) {
if (parse_u32_arg(&argc, &argv, &attr.map_flags,
"flags"))
goto exit;
} else if (is_prefix(*argv, "dev")) {
p_info("Warning: 'bpftool map create [...] dev <ifname>' syntax is deprecated.\n"
"Going further, please use 'offload_dev <ifname>' to request hardware offload for the map.");
goto offload_dev;
} else if (is_prefix(*argv, "offload_dev")) {
offload_dev:
NEXT_ARG();
if (attr.map_ifindex) {
p_err("offload device already specified");
goto exit;
}
attr.map_ifindex = if_nametoindex(*argv);
if (!attr.map_ifindex) {
p_err("unrecognized netdevice '%s': %s",
*argv, strerror(errno));
goto exit;
}
NEXT_ARG();
} else if (is_prefix(*argv, "inner_map")) {
struct bpf_map_info info = {};
__u32 len = sizeof(info);
int inner_map_fd;
NEXT_ARG();
if (!REQ_ARGS(2))
usage();
inner_map_fd = map_parse_fd_and_info(&argc, &argv,
&info, &len);
if (inner_map_fd < 0)
return -1;
attr.inner_map_fd = inner_map_fd;
} else {
p_err("unknown arg %s", *argv);
goto exit;
}
}
if (!map_name) {
p_err("map name not specified");
goto exit;
}
set_max_rlimit();
fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr);
if (fd < 0) {
p_err("map create failed: %s", strerror(errno));
goto exit;
}
err = do_pin_fd(fd, pinfile);
close(fd);
if (err)
goto exit;
if (json_output)
jsonw_null(json_wtr);
exit:
if (attr.inner_map_fd > 0)
close(attr.inner_map_fd);
return err;
}
static int do_pop_dequeue(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
void *key, *value;
int err;
int fd;
if (argc < 2)
usage();
fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
if (fd < 0)
return -1;
err = alloc_key_value(&info, &key, &value);
if (err)
goto exit_free;
err = bpf_map_lookup_and_delete_elem(fd, key, value);
if (err) {
if (errno == ENOENT) {
if (json_output)
jsonw_null(json_wtr);
else
printf("Error: empty map\n");
} else {
p_err("pop failed: %s", strerror(errno));
}
goto exit_free;
}
print_key_value(&info, key, value);
exit_free:
free(key);
free(value);
close(fd);
return err;
}
static int do_freeze(int argc, char **argv)
{
int err, fd;
if (!REQ_ARGS(2))
return -1;
fd = map_parse_fd(&argc, &argv);
if (fd < 0)
return -1;
if (argc) {
close(fd);
return BAD_ARG();
}
err = bpf_map_freeze(fd);
close(fd);
if (err) {
p_err("failed to freeze map: %s", strerror(errno));
return err;
}
if (json_output)
jsonw_null(json_wtr);
return 0;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s { show | list } [MAP]\n"
" %1$s %2$s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
" [inner_map MAP] [offload_dev NAME]\n"
" %1$s %2$s dump MAP\n"
" %1$s %2$s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
" %1$s %2$s lookup MAP [key DATA]\n"
" %1$s %2$s getnext MAP [key DATA]\n"
" %1$s %2$s delete MAP key DATA\n"
" %1$s %2$s pin MAP FILE\n"
" %1$s %2$s event_pipe MAP [cpu N index M]\n"
" %1$s %2$s peek MAP\n"
" %1$s %2$s push MAP value VALUE\n"
" %1$s %2$s pop MAP\n"
" %1$s %2$s enqueue MAP value VALUE\n"
" %1$s %2$s dequeue MAP\n"
" %1$s %2$s freeze MAP\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
" DATA := { [hex] BYTES }\n"
" " HELP_SPEC_PROGRAM "\n"
" VALUE := { DATA | MAP | PROG }\n"
" UPDATE_FLAGS := { any | exist | noexist }\n"
" TYPE := { hash | array | prog_array | perf_event_array | percpu_hash |\n"
" percpu_array | stack_trace | cgroup_array | lru_hash |\n"
" lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
" task_storage | bloom_filter | user_ringbuf | cgrp_storage }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-n|--nomount} }\n"
"",
bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
{ "help", do_help },
{ "dump", do_dump },
{ "update", do_update },
{ "lookup", do_lookup },
{ "getnext", do_getnext },
{ "delete", do_delete },
{ "pin", do_pin },
{ "event_pipe", do_event_pipe },
{ "create", do_create },
{ "peek", do_lookup },
{ "push", do_update },
{ "enqueue", do_update },
{ "pop", do_pop_dequeue },
{ "dequeue", do_pop_dequeue },
{ "freeze", do_freeze },
{ 0 }
};
int do_map(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/map.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2020 Facebook
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <unistd.h>
#include <linux/err.h>
#include <bpf/libbpf.h>
#include "main.h"
static int do_pin(int argc, char **argv)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, iter_opts);
union bpf_iter_link_info linfo;
const char *objfile, *path;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link;
int err = -1, map_fd = -1;
if (!REQ_ARGS(2))
usage();
objfile = GET_ARG();
path = GET_ARG();
/* optional arguments */
if (argc) {
if (is_prefix(*argv, "map")) {
NEXT_ARG();
if (!REQ_ARGS(2)) {
p_err("incorrect map spec");
return -1;
}
map_fd = map_parse_fd(&argc, &argv);
if (map_fd < 0)
return -1;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
iter_opts.link_info = &linfo;
iter_opts.link_info_len = sizeof(linfo);
}
}
obj = bpf_object__open(objfile);
if (!obj) {
err = -errno;
p_err("can't open objfile %s", objfile);
goto close_map_fd;
}
err = bpf_object__load(obj);
if (err) {
p_err("can't load objfile %s", objfile);
goto close_obj;
}
prog = bpf_object__next_program(obj, NULL);
if (!prog) {
err = -errno;
p_err("can't find bpf program in objfile %s", objfile);
goto close_obj;
}
link = bpf_program__attach_iter(prog, &iter_opts);
if (!link) {
err = -errno;
p_err("attach_iter failed for program %s",
bpf_program__name(prog));
goto close_obj;
}
err = mount_bpffs_for_pin(path, false);
if (err)
goto close_link;
err = bpf_link__pin(link, path);
if (err) {
p_err("pin_iter failed for program %s to path %s",
bpf_program__name(prog), path);
goto close_link;
}
close_link:
bpf_link__destroy(link);
close_obj:
bpf_object__close(obj);
close_map_fd:
if (map_fd >= 0)
close(map_fd);
return err;
}
static int do_help(int argc, char **argv)
{
fprintf(stderr,
"Usage: %1$s %2$s pin OBJ PATH [map MAP]\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_OPTIONS " }\n"
"",
bin_name, "iter");
return 0;
}
static const struct cmd cmds[] = {
{ "help", do_help },
{ "pin", do_pin },
{ 0 }
};
int do_iter(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/iter.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <net/if.h>
#include <linux/rtnetlink.h>
#include <linux/socket.h>
#include <linux/tc_act/tc_bpf.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "bpf/nlattr.h"
#include "main.h"
#include "netlink_dumper.h"
#ifndef SOL_NETLINK
#define SOL_NETLINK 270
#endif
struct ip_devname_ifindex {
char devname[64];
int ifindex;
};
struct bpf_netdev_t {
struct ip_devname_ifindex *devices;
int used_len;
int array_len;
int filter_idx;
};
struct tc_kind_handle {
char kind[64];
int handle;
};
struct bpf_tcinfo_t {
struct tc_kind_handle *handle_array;
int used_len;
int array_len;
bool is_qdisc;
};
struct bpf_filter_t {
const char *kind;
const char *devname;
int ifindex;
};
struct bpf_attach_info {
__u32 flow_dissector_id;
};
enum net_attach_type {
NET_ATTACH_TYPE_XDP,
NET_ATTACH_TYPE_XDP_GENERIC,
NET_ATTACH_TYPE_XDP_DRIVER,
NET_ATTACH_TYPE_XDP_OFFLOAD,
};
static const char * const attach_type_strings[] = {
[NET_ATTACH_TYPE_XDP] = "xdp",
[NET_ATTACH_TYPE_XDP_GENERIC] = "xdpgeneric",
[NET_ATTACH_TYPE_XDP_DRIVER] = "xdpdrv",
[NET_ATTACH_TYPE_XDP_OFFLOAD] = "xdpoffload",
};
static const char * const attach_loc_strings[] = {
[BPF_TCX_INGRESS] = "tcx/ingress",
[BPF_TCX_EGRESS] = "tcx/egress",
};
const size_t net_attach_type_size = ARRAY_SIZE(attach_type_strings);
static enum net_attach_type parse_attach_type(const char *str)
{
enum net_attach_type type;
for (type = 0; type < net_attach_type_size; type++) {
if (attach_type_strings[type] &&
is_prefix(str, attach_type_strings[type]))
return type;
}
return net_attach_type_size;
}
typedef int (*dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, dump_nlmsg_t, void *cookie);
static int netlink_open(__u32 *nl_pid)
{
struct sockaddr_nl sa;
socklen_t addrlen;
int one = 1, ret;
int sock;
memset(&sa, 0, sizeof(sa));
sa.nl_family = AF_NETLINK;
sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (sock < 0)
return -errno;
if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
&one, sizeof(one)) < 0) {
p_err("Netlink error reporting not supported");
}
if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
ret = -errno;
goto cleanup;
}
addrlen = sizeof(sa);
if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
ret = -errno;
goto cleanup;
}
if (addrlen != sizeof(sa)) {
ret = -LIBBPF_ERRNO__INTERNAL;
goto cleanup;
}
*nl_pid = sa.nl_pid;
return sock;
cleanup:
close(sock);
return ret;
}
static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
__dump_nlmsg_t _fn, dump_nlmsg_t fn,
void *cookie)
{
bool multipart = true;
struct nlmsgerr *err;
struct nlmsghdr *nh;
char buf[4096];
int len, ret;
while (multipart) {
multipart = false;
len = recv(sock, buf, sizeof(buf), 0);
if (len < 0) {
ret = -errno;
goto done;
}
if (len == 0)
break;
for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, (unsigned int)len);
nh = NLMSG_NEXT(nh, len)) {
if (nh->nlmsg_pid != nl_pid) {
ret = -LIBBPF_ERRNO__WRNGPID;
goto done;
}
if (nh->nlmsg_seq != seq) {
ret = -LIBBPF_ERRNO__INVSEQ;
goto done;
}
if (nh->nlmsg_flags & NLM_F_MULTI)
multipart = true;
switch (nh->nlmsg_type) {
case NLMSG_ERROR:
err = (struct nlmsgerr *)NLMSG_DATA(nh);
if (!err->error)
continue;
ret = err->error;
libbpf_nla_dump_errormsg(nh);
goto done;
case NLMSG_DONE:
return 0;
default:
break;
}
if (_fn) {
ret = _fn(nh, fn, cookie);
if (ret)
return ret;
}
}
}
ret = 0;
done:
return ret;
}
static int __dump_class_nlmsg(struct nlmsghdr *nlh,
dump_nlmsg_t dump_class_nlmsg,
void *cookie)
{
struct nlattr *tb[TCA_MAX + 1], *attr;
struct tcmsg *t = NLMSG_DATA(nlh);
int len;
len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
return -LIBBPF_ERRNO__NLPARSE;
return dump_class_nlmsg(cookie, t, tb);
}
static int netlink_get_class(int sock, unsigned int nl_pid, int ifindex,
dump_nlmsg_t dump_class_nlmsg, void *cookie)
{
struct {
struct nlmsghdr nlh;
struct tcmsg t;
} req = {
.nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
.nlh.nlmsg_type = RTM_GETTCLASS,
.nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
.t.tcm_family = AF_UNSPEC,
.t.tcm_ifindex = ifindex,
};
int seq = time(NULL);
req.nlh.nlmsg_seq = seq;
if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
return -errno;
return netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg,
dump_class_nlmsg, cookie);
}
static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh,
dump_nlmsg_t dump_qdisc_nlmsg,
void *cookie)
{
struct nlattr *tb[TCA_MAX + 1], *attr;
struct tcmsg *t = NLMSG_DATA(nlh);
int len;
len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
return -LIBBPF_ERRNO__NLPARSE;
return dump_qdisc_nlmsg(cookie, t, tb);
}
static int netlink_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
dump_nlmsg_t dump_qdisc_nlmsg, void *cookie)
{
struct {
struct nlmsghdr nlh;
struct tcmsg t;
} req = {
.nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
.nlh.nlmsg_type = RTM_GETQDISC,
.nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
.t.tcm_family = AF_UNSPEC,
.t.tcm_ifindex = ifindex,
};
int seq = time(NULL);
req.nlh.nlmsg_seq = seq;
if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
return -errno;
return netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg,
dump_qdisc_nlmsg, cookie);
}
static int __dump_filter_nlmsg(struct nlmsghdr *nlh,
dump_nlmsg_t dump_filter_nlmsg,
void *cookie)
{
struct nlattr *tb[TCA_MAX + 1], *attr;
struct tcmsg *t = NLMSG_DATA(nlh);
int len;
len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
return -LIBBPF_ERRNO__NLPARSE;
return dump_filter_nlmsg(cookie, t, tb);
}
static int netlink_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
dump_nlmsg_t dump_filter_nlmsg, void *cookie)
{
struct {
struct nlmsghdr nlh;
struct tcmsg t;
} req = {
.nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
.nlh.nlmsg_type = RTM_GETTFILTER,
.nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
.t.tcm_family = AF_UNSPEC,
.t.tcm_ifindex = ifindex,
.t.tcm_parent = handle,
};
int seq = time(NULL);
req.nlh.nlmsg_seq = seq;
if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
return -errno;
return netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg,
dump_filter_nlmsg, cookie);
}
static int __dump_link_nlmsg(struct nlmsghdr *nlh,
dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
struct nlattr *tb[IFLA_MAX + 1], *attr;
struct ifinfomsg *ifi = NLMSG_DATA(nlh);
int len;
len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
return -LIBBPF_ERRNO__NLPARSE;
return dump_link_nlmsg(cookie, ifi, tb);
}
static int netlink_get_link(int sock, unsigned int nl_pid,
dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
struct {
struct nlmsghdr nlh;
struct ifinfomsg ifm;
} req = {
.nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
.nlh.nlmsg_type = RTM_GETLINK,
.nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
.ifm.ifi_family = AF_PACKET,
};
int seq = time(NULL);
req.nlh.nlmsg_seq = seq;
if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
return -errno;
return netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
dump_link_nlmsg, cookie);
}
static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_netdev_t *netinfo = cookie;
struct ifinfomsg *ifinfo = msg;
if (netinfo->filter_idx > 0 && netinfo->filter_idx != ifinfo->ifi_index)
return 0;
if (netinfo->used_len == netinfo->array_len) {
netinfo->devices = realloc(netinfo->devices,
(netinfo->array_len + 16) *
sizeof(struct ip_devname_ifindex));
if (!netinfo->devices)
return -ENOMEM;
netinfo->array_len += 16;
}
netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index;
snprintf(netinfo->devices[netinfo->used_len].devname,
sizeof(netinfo->devices[netinfo->used_len].devname),
"%s",
tb[IFLA_IFNAME]
? libbpf_nla_getattr_str(tb[IFLA_IFNAME])
: "");
netinfo->used_len++;
return do_xdp_dump(ifinfo, tb);
}
static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_tcinfo_t *tcinfo = cookie;
struct tcmsg *info = msg;
if (tcinfo->is_qdisc) {
/* skip clsact qdisc */
if (tb[TCA_KIND] &&
strcmp(libbpf_nla_data(tb[TCA_KIND]), "clsact") == 0)
return 0;
if (info->tcm_handle == 0)
return 0;
}
if (tcinfo->used_len == tcinfo->array_len) {
tcinfo->handle_array = realloc(tcinfo->handle_array,
(tcinfo->array_len + 16) * sizeof(struct tc_kind_handle));
if (!tcinfo->handle_array)
return -ENOMEM;
tcinfo->array_len += 16;
}
tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle;
snprintf(tcinfo->handle_array[tcinfo->used_len].kind,
sizeof(tcinfo->handle_array[tcinfo->used_len].kind),
"%s",
tb[TCA_KIND]
? libbpf_nla_getattr_str(tb[TCA_KIND])
: "unknown");
tcinfo->used_len++;
return 0;
}
static int dump_filter_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
const struct bpf_filter_t *filter_info = cookie;
return do_filter_dump((struct tcmsg *)msg, tb, filter_info->kind,
filter_info->devname, filter_info->ifindex);
}
static int __show_dev_tc_bpf_name(__u32 id, char *name, size_t len)
{
struct bpf_prog_info info = {};
__u32 ilen = sizeof(info);
int fd, ret;
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0)
return fd;
ret = bpf_obj_get_info_by_fd(fd, &info, &ilen);
if (ret < 0)
goto out;
ret = -ENOENT;
if (info.name[0]) {
get_prog_full_name(&info, fd, name, len);
ret = 0;
}
out:
close(fd);
return ret;
}
static void __show_dev_tc_bpf(const struct ip_devname_ifindex *dev,
const enum bpf_attach_type loc)
{
__u32 prog_flags[64] = {}, link_flags[64] = {}, i, j;
__u32 prog_ids[64] = {}, link_ids[64] = {};
LIBBPF_OPTS(bpf_prog_query_opts, optq);
char prog_name[MAX_PROG_FULL_NAME];
int ret;
optq.prog_ids = prog_ids;
optq.prog_attach_flags = prog_flags;
optq.link_ids = link_ids;
optq.link_attach_flags = link_flags;
optq.count = ARRAY_SIZE(prog_ids);
ret = bpf_prog_query_opts(dev->ifindex, loc, &optq);
if (ret)
return;
for (i = 0; i < optq.count; i++) {
NET_START_OBJECT;
NET_DUMP_STR("devname", "%s", dev->devname);
NET_DUMP_UINT("ifindex", "(%u)", dev->ifindex);
NET_DUMP_STR("kind", " %s", attach_loc_strings[loc]);
ret = __show_dev_tc_bpf_name(prog_ids[i], prog_name,
sizeof(prog_name));
if (!ret)
NET_DUMP_STR("name", " %s", prog_name);
NET_DUMP_UINT("prog_id", " prog_id %u ", prog_ids[i]);
if (prog_flags[i] || json_output) {
NET_START_ARRAY("prog_flags", "%s ");
for (j = 0; prog_flags[i] && j < 32; j++) {
if (!(prog_flags[i] & (1 << j)))
continue;
NET_DUMP_UINT_ONLY(1 << j);
}
NET_END_ARRAY("");
}
if (link_ids[i] || json_output) {
NET_DUMP_UINT("link_id", "link_id %u ", link_ids[i]);
if (link_flags[i] || json_output) {
NET_START_ARRAY("link_flags", "%s ");
for (j = 0; link_flags[i] && j < 32; j++) {
if (!(link_flags[i] & (1 << j)))
continue;
NET_DUMP_UINT_ONLY(1 << j);
}
NET_END_ARRAY("");
}
}
NET_END_OBJECT_FINAL;
}
}
static void show_dev_tc_bpf(struct ip_devname_ifindex *dev)
{
__show_dev_tc_bpf(dev, BPF_TCX_INGRESS);
__show_dev_tc_bpf(dev, BPF_TCX_EGRESS);
}
static int show_dev_tc_bpf_classic(int sock, unsigned int nl_pid,
struct ip_devname_ifindex *dev)
{
struct bpf_filter_t filter_info;
struct bpf_tcinfo_t tcinfo;
int i, handle, ret = 0;
tcinfo.handle_array = NULL;
tcinfo.used_len = 0;
tcinfo.array_len = 0;
tcinfo.is_qdisc = false;
ret = netlink_get_class(sock, nl_pid, dev->ifindex,
dump_class_qdisc_nlmsg, &tcinfo);
if (ret)
goto out;
tcinfo.is_qdisc = true;
ret = netlink_get_qdisc(sock, nl_pid, dev->ifindex,
dump_class_qdisc_nlmsg, &tcinfo);
if (ret)
goto out;
filter_info.devname = dev->devname;
filter_info.ifindex = dev->ifindex;
for (i = 0; i < tcinfo.used_len; i++) {
filter_info.kind = tcinfo.handle_array[i].kind;
ret = netlink_get_filter(sock, nl_pid, dev->ifindex,
tcinfo.handle_array[i].handle,
dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
}
/* root, ingress and egress handle */
handle = TC_H_ROOT;
filter_info.kind = "root";
ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
filter_info.kind = "clsact/ingress";
ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_EGRESS);
filter_info.kind = "clsact/egress";
ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
out:
free(tcinfo.handle_array);
return 0;
}
static int query_flow_dissector(struct bpf_attach_info *attach_info)
{
__u32 attach_flags;
__u32 prog_ids[1];
__u32 prog_cnt;
int err;
int fd;
fd = open("/proc/self/ns/net", O_RDONLY);
if (fd < 0) {
p_err("can't open /proc/self/ns/net: %s",
strerror(errno));
return -1;
}
prog_cnt = ARRAY_SIZE(prog_ids);
err = bpf_prog_query(fd, BPF_FLOW_DISSECTOR, 0,
&attach_flags, prog_ids, &prog_cnt);
close(fd);
if (err) {
if (errno == EINVAL) {
/* Older kernel's don't support querying
* flow dissector programs.
*/
errno = 0;
return 0;
}
p_err("can't query prog: %s", strerror(errno));
return -1;
}
if (prog_cnt == 1)
attach_info->flow_dissector_id = prog_ids[0];
return 0;
}
static int net_parse_dev(int *argc, char ***argv)
{
int ifindex;
if (is_prefix(**argv, "dev")) {
NEXT_ARGP();
ifindex = if_nametoindex(**argv);
if (!ifindex)
p_err("invalid devname %s", **argv);
NEXT_ARGP();
} else {
p_err("expected 'dev', got: '%s'?", **argv);
return -1;
}
return ifindex;
}
static int do_attach_detach_xdp(int progfd, enum net_attach_type attach_type,
int ifindex, bool overwrite)
{
__u32 flags = 0;
if (!overwrite)
flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
if (attach_type == NET_ATTACH_TYPE_XDP_GENERIC)
flags |= XDP_FLAGS_SKB_MODE;
if (attach_type == NET_ATTACH_TYPE_XDP_DRIVER)
flags |= XDP_FLAGS_DRV_MODE;
if (attach_type == NET_ATTACH_TYPE_XDP_OFFLOAD)
flags |= XDP_FLAGS_HW_MODE;
return bpf_xdp_attach(ifindex, progfd, flags, NULL);
}
static int do_attach(int argc, char **argv)
{
enum net_attach_type attach_type;
int progfd, ifindex, err = 0;
bool overwrite = false;
/* parse attach args */
if (!REQ_ARGS(5))
return -EINVAL;
attach_type = parse_attach_type(*argv);
if (attach_type == net_attach_type_size) {
p_err("invalid net attach/detach type: %s", *argv);
return -EINVAL;
}
NEXT_ARG();
progfd = prog_parse_fd(&argc, &argv);
if (progfd < 0)
return -EINVAL;
ifindex = net_parse_dev(&argc, &argv);
if (ifindex < 1) {
err = -EINVAL;
goto cleanup;
}
if (argc) {
if (is_prefix(*argv, "overwrite")) {
overwrite = true;
} else {
p_err("expected 'overwrite', got: '%s'?", *argv);
err = -EINVAL;
goto cleanup;
}
}
/* attach xdp prog */
if (is_prefix("xdp", attach_type_strings[attach_type]))
err = do_attach_detach_xdp(progfd, attach_type, ifindex,
overwrite);
if (err) {
p_err("interface %s attach failed: %s",
attach_type_strings[attach_type], strerror(-err));
goto cleanup;
}
if (json_output)
jsonw_null(json_wtr);
cleanup:
close(progfd);
return err;
}
static int do_detach(int argc, char **argv)
{
enum net_attach_type attach_type;
int progfd, ifindex, err = 0;
/* parse detach args */
if (!REQ_ARGS(3))
return -EINVAL;
attach_type = parse_attach_type(*argv);
if (attach_type == net_attach_type_size) {
p_err("invalid net attach/detach type: %s", *argv);
return -EINVAL;
}
NEXT_ARG();
ifindex = net_parse_dev(&argc, &argv);
if (ifindex < 1)
return -EINVAL;
/* detach xdp prog */
progfd = -1;
if (is_prefix("xdp", attach_type_strings[attach_type]))
err = do_attach_detach_xdp(progfd, attach_type, ifindex, NULL);
if (err < 0) {
p_err("interface %s detach failed: %s",
attach_type_strings[attach_type], strerror(-err));
return err;
}
if (json_output)
jsonw_null(json_wtr);
return 0;
}
static int netfilter_link_compar(const void *a, const void *b)
{
const struct bpf_link_info *nfa = a;
const struct bpf_link_info *nfb = b;
int delta;
delta = nfa->netfilter.pf - nfb->netfilter.pf;
if (delta)
return delta;
delta = nfa->netfilter.hooknum - nfb->netfilter.hooknum;
if (delta)
return delta;
if (nfa->netfilter.priority < nfb->netfilter.priority)
return -1;
if (nfa->netfilter.priority > nfb->netfilter.priority)
return 1;
return nfa->netfilter.flags - nfb->netfilter.flags;
}
static void show_link_netfilter(void)
{
unsigned int nf_link_len = 0, nf_link_count = 0;
struct bpf_link_info *nf_link_info = NULL;
__u32 id = 0;
while (true) {
struct bpf_link_info info;
int fd, err;
__u32 len;
err = bpf_link_get_next_id(id, &id);
if (err) {
if (errno == ENOENT)
break;
p_err("can't get next link: %s (id %d)", strerror(errno), id);
break;
}
fd = bpf_link_get_fd_by_id(id);
if (fd < 0) {
p_err("can't get link by id (%u): %s", id, strerror(errno));
continue;
}
memset(&info, 0, sizeof(info));
len = sizeof(info);
err = bpf_link_get_info_by_fd(fd, &info, &len);
close(fd);
if (err) {
p_err("can't get link info for fd %d: %s", fd, strerror(errno));
continue;
}
if (info.type != BPF_LINK_TYPE_NETFILTER)
continue;
if (nf_link_count >= nf_link_len) {
static const unsigned int max_link_count = INT_MAX / sizeof(info);
struct bpf_link_info *expand;
if (nf_link_count > max_link_count) {
p_err("cannot handle more than %u links\n", max_link_count);
break;
}
nf_link_len += 16;
expand = realloc(nf_link_info, nf_link_len * sizeof(info));
if (!expand) {
p_err("realloc: %s", strerror(errno));
break;
}
nf_link_info = expand;
}
nf_link_info[nf_link_count] = info;
nf_link_count++;
}
qsort(nf_link_info, nf_link_count, sizeof(*nf_link_info), netfilter_link_compar);
for (id = 0; id < nf_link_count; id++) {
NET_START_OBJECT;
if (json_output)
netfilter_dump_json(&nf_link_info[id], json_wtr);
else
netfilter_dump_plain(&nf_link_info[id]);
NET_DUMP_UINT("id", " prog_id %u", nf_link_info[id].prog_id);
NET_END_OBJECT;
}
free(nf_link_info);
}
static int do_show(int argc, char **argv)
{
struct bpf_attach_info attach_info = {};
int i, sock, ret, filter_idx = -1;
struct bpf_netdev_t dev_array;
unsigned int nl_pid = 0;
char err_buf[256];
if (argc == 2) {
filter_idx = net_parse_dev(&argc, &argv);
if (filter_idx < 1)
return -1;
} else if (argc != 0) {
usage();
}
ret = query_flow_dissector(&attach_info);
if (ret)
return -1;
sock = netlink_open(&nl_pid);
if (sock < 0) {
fprintf(stderr, "failed to open netlink sock\n");
return -1;
}
dev_array.devices = NULL;
dev_array.used_len = 0;
dev_array.array_len = 0;
dev_array.filter_idx = filter_idx;
if (json_output)
jsonw_start_array(json_wtr);
NET_START_OBJECT;
NET_START_ARRAY("xdp", "%s:\n");
ret = netlink_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array);
NET_END_ARRAY("\n");
if (!ret) {
NET_START_ARRAY("tc", "%s:\n");
for (i = 0; i < dev_array.used_len; i++) {
show_dev_tc_bpf(&dev_array.devices[i]);
ret = show_dev_tc_bpf_classic(sock, nl_pid,
&dev_array.devices[i]);
if (ret)
break;
}
NET_END_ARRAY("\n");
}
NET_START_ARRAY("flow_dissector", "%s:\n");
if (attach_info.flow_dissector_id > 0)
NET_DUMP_UINT("id", "id %u", attach_info.flow_dissector_id);
NET_END_ARRAY("\n");
NET_START_ARRAY("netfilter", "%s:\n");
show_link_netfilter();
NET_END_ARRAY("\n");
NET_END_OBJECT;
if (json_output)
jsonw_end_array(json_wtr);
if (ret) {
if (json_output)
jsonw_null(json_wtr);
libbpf_strerror(ret, err_buf, sizeof(err_buf));
fprintf(stderr, "Error: %s\n", err_buf);
}
free(dev_array.devices);
close(sock);
return ret;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s { show | list } [dev <devname>]\n"
" %1$s %2$s attach ATTACH_TYPE PROG dev <devname> [ overwrite ]\n"
" %1$s %2$s detach ATTACH_TYPE dev <devname>\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_PROGRAM "\n"
" ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n"
" " HELP_SPEC_OPTIONS " }\n"
"\n"
"Note: Only xdp, tcx, tc, flow_dissector and netfilter attachments\n"
" are currently supported.\n"
" For progs attached to cgroups, use \"bpftool cgroup\"\n"
" to dump program attachments. For program types\n"
" sk_{filter,skb,msg,reuseport} and lwt/seg6, please\n"
" consult iproute2.\n"
"",
bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
{ "attach", do_attach },
{ "detach", do_detach },
{ "help", do_help },
{ 0 }
};
int do_net(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/net.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
#include <stdlib.h>
#include <string.h>
#include <bpf/libbpf.h>
#include <linux/rtnetlink.h>
#include <linux/tc_act/tc_bpf.h>
#include "bpf/nlattr.h"
#include "main.h"
#include "netlink_dumper.h"
static void xdp_dump_prog_id(struct nlattr **tb, int attr,
const char *mode,
bool new_json_object)
{
if (!tb[attr])
return;
if (new_json_object)
NET_START_OBJECT
NET_DUMP_STR("mode", " %s", mode);
NET_DUMP_UINT("id", " id %u", libbpf_nla_getattr_u32(tb[attr]))
if (new_json_object)
NET_END_OBJECT
}
static int do_xdp_dump_one(struct nlattr *attr, unsigned int ifindex,
const char *name)
{
struct nlattr *tb[IFLA_XDP_MAX + 1];
unsigned char mode;
if (libbpf_nla_parse_nested(tb, IFLA_XDP_MAX, attr, NULL) < 0)
return -1;
if (!tb[IFLA_XDP_ATTACHED])
return 0;
mode = libbpf_nla_getattr_u8(tb[IFLA_XDP_ATTACHED]);
if (mode == XDP_ATTACHED_NONE)
return 0;
NET_START_OBJECT;
if (name)
NET_DUMP_STR("devname", "%s", name);
NET_DUMP_UINT("ifindex", "(%d)", ifindex);
if (mode == XDP_ATTACHED_MULTI) {
if (json_output) {
jsonw_name(json_wtr, "multi_attachments");
jsonw_start_array(json_wtr);
}
xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic", true);
xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "driver", true);
xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_ID, "offload", true);
if (json_output)
jsonw_end_array(json_wtr);
} else if (mode == XDP_ATTACHED_DRV) {
xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "driver", false);
} else if (mode == XDP_ATTACHED_SKB) {
xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "generic", false);
} else if (mode == XDP_ATTACHED_HW) {
xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "offload", false);
}
NET_END_OBJECT_FINAL;
return 0;
}
int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb)
{
if (!tb[IFLA_XDP])
return 0;
return do_xdp_dump_one(tb[IFLA_XDP], ifinfo->ifi_index,
libbpf_nla_getattr_str(tb[IFLA_IFNAME]));
}
static int do_bpf_dump_one_act(struct nlattr *attr)
{
struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
if (libbpf_nla_parse_nested(tb, TCA_ACT_BPF_MAX, attr, NULL) < 0)
return -LIBBPF_ERRNO__NLPARSE;
if (!tb[TCA_ACT_BPF_PARMS])
return -LIBBPF_ERRNO__NLPARSE;
NET_START_OBJECT_NESTED2;
if (tb[TCA_ACT_BPF_NAME])
NET_DUMP_STR("name", "%s",
libbpf_nla_getattr_str(tb[TCA_ACT_BPF_NAME]));
if (tb[TCA_ACT_BPF_ID])
NET_DUMP_UINT("id", " id %u",
libbpf_nla_getattr_u32(tb[TCA_ACT_BPF_ID]));
NET_END_OBJECT_NESTED;
return 0;
}
static int do_dump_one_act(struct nlattr *attr)
{
struct nlattr *tb[TCA_ACT_MAX + 1];
if (!attr)
return 0;
if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX, attr, NULL) < 0)
return -LIBBPF_ERRNO__NLPARSE;
if (tb[TCA_ACT_KIND] &&
strcmp(libbpf_nla_data(tb[TCA_ACT_KIND]), "bpf") == 0)
return do_bpf_dump_one_act(tb[TCA_ACT_OPTIONS]);
return 0;
}
static int do_bpf_act_dump(struct nlattr *attr)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
int act, ret;
if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX_PRIO, attr, NULL) < 0)
return -LIBBPF_ERRNO__NLPARSE;
NET_START_ARRAY("act", " %s [");
for (act = 0; act <= TCA_ACT_MAX_PRIO; act++) {
ret = do_dump_one_act(tb[act]);
if (ret)
break;
}
NET_END_ARRAY("] ");
return ret;
}
static int do_bpf_filter_dump(struct nlattr *attr)
{
struct nlattr *tb[TCA_BPF_MAX + 1];
int ret;
if (libbpf_nla_parse_nested(tb, TCA_BPF_MAX, attr, NULL) < 0)
return -LIBBPF_ERRNO__NLPARSE;
if (tb[TCA_BPF_NAME])
NET_DUMP_STR("name", " %s",
libbpf_nla_getattr_str(tb[TCA_BPF_NAME]));
if (tb[TCA_BPF_ID])
NET_DUMP_UINT("id", " id %u",
libbpf_nla_getattr_u32(tb[TCA_BPF_ID]));
if (tb[TCA_BPF_ACT]) {
ret = do_bpf_act_dump(tb[TCA_BPF_ACT]);
if (ret)
return ret;
}
return 0;
}
int do_filter_dump(struct tcmsg *info, struct nlattr **tb, const char *kind,
const char *devname, int ifindex)
{
int ret = 0;
if (tb[TCA_OPTIONS] &&
strcmp(libbpf_nla_data(tb[TCA_KIND]), "bpf") == 0) {
NET_START_OBJECT;
if (devname[0] != '\0')
NET_DUMP_STR("devname", "%s", devname);
NET_DUMP_UINT("ifindex", "(%u)", ifindex);
NET_DUMP_STR("kind", " %s", kind);
ret = do_bpf_filter_dump(tb[TCA_OPTIONS]);
NET_END_OBJECT_FINAL;
}
return ret;
}
| linux-master | tools/bpf/bpftool/netlink_dumper.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Facebook */
#include <errno.h>
#include <fcntl.h>
#include <linux/err.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <linux/btf.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h>
#include "json_writer.h"
#include "main.h"
static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_UNKN] = "UNKNOWN",
[BTF_KIND_INT] = "INT",
[BTF_KIND_PTR] = "PTR",
[BTF_KIND_ARRAY] = "ARRAY",
[BTF_KIND_STRUCT] = "STRUCT",
[BTF_KIND_UNION] = "UNION",
[BTF_KIND_ENUM] = "ENUM",
[BTF_KIND_FWD] = "FWD",
[BTF_KIND_TYPEDEF] = "TYPEDEF",
[BTF_KIND_VOLATILE] = "VOLATILE",
[BTF_KIND_CONST] = "CONST",
[BTF_KIND_RESTRICT] = "RESTRICT",
[BTF_KIND_FUNC] = "FUNC",
[BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
[BTF_KIND_VAR] = "VAR",
[BTF_KIND_DATASEC] = "DATASEC",
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
[BTF_KIND_ENUM64] = "ENUM64",
};
static const char *btf_int_enc_str(__u8 encoding)
{
switch (encoding) {
case 0:
return "(none)";
case BTF_INT_SIGNED:
return "SIGNED";
case BTF_INT_CHAR:
return "CHAR";
case BTF_INT_BOOL:
return "BOOL";
default:
return "UNKN";
}
}
static const char *btf_var_linkage_str(__u32 linkage)
{
switch (linkage) {
case BTF_VAR_STATIC:
return "static";
case BTF_VAR_GLOBAL_ALLOCATED:
return "global";
case BTF_VAR_GLOBAL_EXTERN:
return "extern";
default:
return "(unknown)";
}
}
static const char *btf_func_linkage_str(const struct btf_type *t)
{
switch (btf_vlen(t)) {
case BTF_FUNC_STATIC:
return "static";
case BTF_FUNC_GLOBAL:
return "global";
case BTF_FUNC_EXTERN:
return "extern";
default:
return "(unknown)";
}
}
static const char *btf_str(const struct btf *btf, __u32 off)
{
if (!off)
return "(anon)";
return btf__name_by_offset(btf, off) ? : "(invalid)";
}
static int btf_kind_safe(int kind)
{
return kind <= BTF_KIND_MAX ? kind : BTF_KIND_UNKN;
}
static int dump_btf_type(const struct btf *btf, __u32 id,
const struct btf_type *t)
{
json_writer_t *w = json_wtr;
int kind = btf_kind(t);
if (json_output) {
jsonw_start_object(w);
jsonw_uint_field(w, "id", id);
jsonw_string_field(w, "kind", btf_kind_str[btf_kind_safe(kind)]);
jsonw_string_field(w, "name", btf_str(btf, t->name_off));
} else {
printf("[%u] %s '%s'", id, btf_kind_str[btf_kind_safe(kind)],
btf_str(btf, t->name_off));
}
switch (kind) {
case BTF_KIND_INT: {
__u32 v = *(__u32 *)(t + 1);
const char *enc;
enc = btf_int_enc_str(BTF_INT_ENCODING(v));
if (json_output) {
jsonw_uint_field(w, "size", t->size);
jsonw_uint_field(w, "bits_offset", BTF_INT_OFFSET(v));
jsonw_uint_field(w, "nr_bits", BTF_INT_BITS(v));
jsonw_string_field(w, "encoding", enc);
} else {
printf(" size=%u bits_offset=%u nr_bits=%u encoding=%s",
t->size, BTF_INT_OFFSET(v), BTF_INT_BITS(v),
enc);
}
break;
}
case BTF_KIND_PTR:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
case BTF_KIND_TYPE_TAG:
if (json_output)
jsonw_uint_field(w, "type_id", t->type);
else
printf(" type_id=%u", t->type);
break;
case BTF_KIND_ARRAY: {
const struct btf_array *arr = (const void *)(t + 1);
if (json_output) {
jsonw_uint_field(w, "type_id", arr->type);
jsonw_uint_field(w, "index_type_id", arr->index_type);
jsonw_uint_field(w, "nr_elems", arr->nelems);
} else {
printf(" type_id=%u index_type_id=%u nr_elems=%u",
arr->type, arr->index_type, arr->nelems);
}
break;
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
const struct btf_member *m = (const void *)(t + 1);
__u16 vlen = BTF_INFO_VLEN(t->info);
int i;
if (json_output) {
jsonw_uint_field(w, "size", t->size);
jsonw_uint_field(w, "vlen", vlen);
jsonw_name(w, "members");
jsonw_start_array(w);
} else {
printf(" size=%u vlen=%u", t->size, vlen);
}
for (i = 0; i < vlen; i++, m++) {
const char *name = btf_str(btf, m->name_off);
__u32 bit_off, bit_sz;
if (BTF_INFO_KFLAG(t->info)) {
bit_off = BTF_MEMBER_BIT_OFFSET(m->offset);
bit_sz = BTF_MEMBER_BITFIELD_SIZE(m->offset);
} else {
bit_off = m->offset;
bit_sz = 0;
}
if (json_output) {
jsonw_start_object(w);
jsonw_string_field(w, "name", name);
jsonw_uint_field(w, "type_id", m->type);
jsonw_uint_field(w, "bits_offset", bit_off);
if (bit_sz) {
jsonw_uint_field(w, "bitfield_size",
bit_sz);
}
jsonw_end_object(w);
} else {
printf("\n\t'%s' type_id=%u bits_offset=%u",
name, m->type, bit_off);
if (bit_sz)
printf(" bitfield_size=%u", bit_sz);
}
}
if (json_output)
jsonw_end_array(w);
break;
}
case BTF_KIND_ENUM: {
const struct btf_enum *v = (const void *)(t + 1);
__u16 vlen = BTF_INFO_VLEN(t->info);
const char *encoding;
int i;
encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
if (json_output) {
jsonw_string_field(w, "encoding", encoding);
jsonw_uint_field(w, "size", t->size);
jsonw_uint_field(w, "vlen", vlen);
jsonw_name(w, "values");
jsonw_start_array(w);
} else {
printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
}
for (i = 0; i < vlen; i++, v++) {
const char *name = btf_str(btf, v->name_off);
if (json_output) {
jsonw_start_object(w);
jsonw_string_field(w, "name", name);
if (btf_kflag(t))
jsonw_int_field(w, "val", v->val);
else
jsonw_uint_field(w, "val", v->val);
jsonw_end_object(w);
} else {
if (btf_kflag(t))
printf("\n\t'%s' val=%d", name, v->val);
else
printf("\n\t'%s' val=%u", name, v->val);
}
}
if (json_output)
jsonw_end_array(w);
break;
}
case BTF_KIND_ENUM64: {
const struct btf_enum64 *v = btf_enum64(t);
__u16 vlen = btf_vlen(t);
const char *encoding;
int i;
encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
if (json_output) {
jsonw_string_field(w, "encoding", encoding);
jsonw_uint_field(w, "size", t->size);
jsonw_uint_field(w, "vlen", vlen);
jsonw_name(w, "values");
jsonw_start_array(w);
} else {
printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
}
for (i = 0; i < vlen; i++, v++) {
const char *name = btf_str(btf, v->name_off);
__u64 val = ((__u64)v->val_hi32 << 32) | v->val_lo32;
if (json_output) {
jsonw_start_object(w);
jsonw_string_field(w, "name", name);
if (btf_kflag(t))
jsonw_int_field(w, "val", val);
else
jsonw_uint_field(w, "val", val);
jsonw_end_object(w);
} else {
if (btf_kflag(t))
printf("\n\t'%s' val=%lldLL", name,
(unsigned long long)val);
else
printf("\n\t'%s' val=%lluULL", name,
(unsigned long long)val);
}
}
if (json_output)
jsonw_end_array(w);
break;
}
case BTF_KIND_FWD: {
const char *fwd_kind = BTF_INFO_KFLAG(t->info) ? "union"
: "struct";
if (json_output)
jsonw_string_field(w, "fwd_kind", fwd_kind);
else
printf(" fwd_kind=%s", fwd_kind);
break;
}
case BTF_KIND_FUNC: {
const char *linkage = btf_func_linkage_str(t);
if (json_output) {
jsonw_uint_field(w, "type_id", t->type);
jsonw_string_field(w, "linkage", linkage);
} else {
printf(" type_id=%u linkage=%s", t->type, linkage);
}
break;
}
case BTF_KIND_FUNC_PROTO: {
const struct btf_param *p = (const void *)(t + 1);
__u16 vlen = BTF_INFO_VLEN(t->info);
int i;
if (json_output) {
jsonw_uint_field(w, "ret_type_id", t->type);
jsonw_uint_field(w, "vlen", vlen);
jsonw_name(w, "params");
jsonw_start_array(w);
} else {
printf(" ret_type_id=%u vlen=%u", t->type, vlen);
}
for (i = 0; i < vlen; i++, p++) {
const char *name = btf_str(btf, p->name_off);
if (json_output) {
jsonw_start_object(w);
jsonw_string_field(w, "name", name);
jsonw_uint_field(w, "type_id", p->type);
jsonw_end_object(w);
} else {
printf("\n\t'%s' type_id=%u", name, p->type);
}
}
if (json_output)
jsonw_end_array(w);
break;
}
case BTF_KIND_VAR: {
const struct btf_var *v = (const void *)(t + 1);
const char *linkage;
linkage = btf_var_linkage_str(v->linkage);
if (json_output) {
jsonw_uint_field(w, "type_id", t->type);
jsonw_string_field(w, "linkage", linkage);
} else {
printf(" type_id=%u, linkage=%s", t->type, linkage);
}
break;
}
case BTF_KIND_DATASEC: {
const struct btf_var_secinfo *v = (const void *)(t + 1);
const struct btf_type *vt;
__u16 vlen = BTF_INFO_VLEN(t->info);
int i;
if (json_output) {
jsonw_uint_field(w, "size", t->size);
jsonw_uint_field(w, "vlen", vlen);
jsonw_name(w, "vars");
jsonw_start_array(w);
} else {
printf(" size=%u vlen=%u", t->size, vlen);
}
for (i = 0; i < vlen; i++, v++) {
if (json_output) {
jsonw_start_object(w);
jsonw_uint_field(w, "type_id", v->type);
jsonw_uint_field(w, "offset", v->offset);
jsonw_uint_field(w, "size", v->size);
jsonw_end_object(w);
} else {
printf("\n\ttype_id=%u offset=%u size=%u",
v->type, v->offset, v->size);
if (v->type < btf__type_cnt(btf)) {
vt = btf__type_by_id(btf, v->type);
printf(" (%s '%s')",
btf_kind_str[btf_kind_safe(btf_kind(vt))],
btf_str(btf, vt->name_off));
}
}
}
if (json_output)
jsonw_end_array(w);
break;
}
case BTF_KIND_FLOAT: {
if (json_output)
jsonw_uint_field(w, "size", t->size);
else
printf(" size=%u", t->size);
break;
}
case BTF_KIND_DECL_TAG: {
const struct btf_decl_tag *tag = (const void *)(t + 1);
if (json_output) {
jsonw_uint_field(w, "type_id", t->type);
jsonw_int_field(w, "component_idx", tag->component_idx);
} else {
printf(" type_id=%u component_idx=%d", t->type, tag->component_idx);
}
break;
}
default:
break;
}
if (json_output)
jsonw_end_object(json_wtr);
else
printf("\n");
return 0;
}
static int dump_btf_raw(const struct btf *btf,
__u32 *root_type_ids, int root_type_cnt)
{
const struct btf_type *t;
int i;
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "types");
jsonw_start_array(json_wtr);
}
if (root_type_cnt) {
for (i = 0; i < root_type_cnt; i++) {
t = btf__type_by_id(btf, root_type_ids[i]);
dump_btf_type(btf, root_type_ids[i], t);
}
} else {
const struct btf *base;
int cnt = btf__type_cnt(btf);
int start_id = 1;
base = btf__base_btf(btf);
if (base)
start_id = btf__type_cnt(base);
for (i = start_id; i < cnt; i++) {
t = btf__type_by_id(btf, i);
dump_btf_type(btf, i, t);
}
}
if (json_output) {
jsonw_end_array(json_wtr);
jsonw_end_object(json_wtr);
}
return 0;
}
static void __printf(2, 0) btf_dump_printf(void *ctx,
const char *fmt, va_list args)
{
vfprintf(stdout, fmt, args);
}
static int dump_btf_c(const struct btf *btf,
__u32 *root_type_ids, int root_type_cnt)
{
struct btf_dump *d;
int err = 0, i;
d = btf_dump__new(btf, btf_dump_printf, NULL, NULL);
if (!d)
return -errno;
printf("#ifndef __VMLINUX_H__\n");
printf("#define __VMLINUX_H__\n");
printf("\n");
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n");
printf("#endif\n\n");
if (root_type_cnt) {
for (i = 0; i < root_type_cnt; i++) {
err = btf_dump__dump_type(d, root_type_ids[i]);
if (err)
goto done;
}
} else {
int cnt = btf__type_cnt(btf);
for (i = 1; i < cnt; i++) {
err = btf_dump__dump_type(d, i);
if (err)
goto done;
}
}
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute pop\n");
printf("#endif\n");
printf("\n");
printf("#endif /* __VMLINUX_H__ */\n");
done:
btf_dump__free(d);
return err;
}
static const char sysfs_vmlinux[] = "/sys/kernel/btf/vmlinux";
static struct btf *get_vmlinux_btf_from_sysfs(void)
{
struct btf *base;
base = btf__parse(sysfs_vmlinux, NULL);
if (!base)
p_err("failed to parse vmlinux BTF at '%s': %d\n",
sysfs_vmlinux, -errno);
return base;
}
#define BTF_NAME_BUFF_LEN 64
static bool btf_is_kernel_module(__u32 btf_id)
{
struct bpf_btf_info btf_info = {};
char btf_name[BTF_NAME_BUFF_LEN];
int btf_fd;
__u32 len;
int err;
btf_fd = bpf_btf_get_fd_by_id(btf_id);
if (btf_fd < 0) {
p_err("can't get BTF object by id (%u): %s", btf_id, strerror(errno));
return false;
}
len = sizeof(btf_info);
btf_info.name = ptr_to_u64(btf_name);
btf_info.name_len = sizeof(btf_name);
err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
close(btf_fd);
if (err) {
p_err("can't get BTF (ID %u) object info: %s", btf_id, strerror(errno));
return false;
}
return btf_info.kernel_btf && strncmp(btf_name, "vmlinux", sizeof(btf_name)) != 0;
}
static int do_dump(int argc, char **argv)
{
struct btf *btf = NULL, *base = NULL;
__u32 root_type_ids[2];
int root_type_cnt = 0;
bool dump_c = false;
__u32 btf_id = -1;
const char *src;
int fd = -1;
int err = 0;
if (!REQ_ARGS(2)) {
usage();
return -1;
}
src = GET_ARG();
if (is_prefix(src, "map")) {
struct bpf_map_info info = {};
__u32 len = sizeof(info);
if (!REQ_ARGS(2)) {
usage();
return -1;
}
fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
if (fd < 0)
return -1;
btf_id = info.btf_id;
if (argc && is_prefix(*argv, "key")) {
root_type_ids[root_type_cnt++] = info.btf_key_type_id;
NEXT_ARG();
} else if (argc && is_prefix(*argv, "value")) {
root_type_ids[root_type_cnt++] = info.btf_value_type_id;
NEXT_ARG();
} else if (argc && is_prefix(*argv, "all")) {
NEXT_ARG();
} else if (argc && is_prefix(*argv, "kv")) {
root_type_ids[root_type_cnt++] = info.btf_key_type_id;
root_type_ids[root_type_cnt++] = info.btf_value_type_id;
NEXT_ARG();
} else {
root_type_ids[root_type_cnt++] = info.btf_key_type_id;
root_type_ids[root_type_cnt++] = info.btf_value_type_id;
}
} else if (is_prefix(src, "prog")) {
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
if (!REQ_ARGS(2)) {
usage();
return -1;
}
fd = prog_parse_fd(&argc, &argv);
if (fd < 0)
return -1;
err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
goto done;
}
btf_id = info.btf_id;
} else if (is_prefix(src, "id")) {
char *endptr;
btf_id = strtoul(*argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as ID", *argv);
return -1;
}
NEXT_ARG();
} else if (is_prefix(src, "file")) {
const char sysfs_prefix[] = "/sys/kernel/btf/";
if (!base_btf &&
strncmp(*argv, sysfs_prefix, sizeof(sysfs_prefix) - 1) == 0 &&
strcmp(*argv, sysfs_vmlinux) != 0)
base = get_vmlinux_btf_from_sysfs();
btf = btf__parse_split(*argv, base ?: base_btf);
if (!btf) {
err = -errno;
p_err("failed to load BTF from %s: %s",
*argv, strerror(errno));
goto done;
}
NEXT_ARG();
} else {
err = -1;
p_err("unrecognized BTF source specifier: '%s'", src);
goto done;
}
while (argc) {
if (is_prefix(*argv, "format")) {
NEXT_ARG();
if (argc < 1) {
p_err("expecting value for 'format' option\n");
err = -EINVAL;
goto done;
}
if (strcmp(*argv, "c") == 0) {
dump_c = true;
} else if (strcmp(*argv, "raw") == 0) {
dump_c = false;
} else {
p_err("unrecognized format specifier: '%s', possible values: raw, c",
*argv);
err = -EINVAL;
goto done;
}
NEXT_ARG();
} else {
p_err("unrecognized option: '%s'", *argv);
err = -EINVAL;
goto done;
}
}
if (!btf) {
if (!base_btf && btf_is_kernel_module(btf_id)) {
p_info("Warning: valid base BTF was not specified with -B option, falling back to standard base BTF (%s)",
sysfs_vmlinux);
base_btf = get_vmlinux_btf_from_sysfs();
}
btf = btf__load_from_kernel_by_id_split(btf_id, base_btf);
if (!btf) {
err = -errno;
p_err("get btf by id (%u): %s", btf_id, strerror(errno));
goto done;
}
}
if (dump_c) {
if (json_output) {
p_err("JSON output for C-syntax dump is not supported");
err = -ENOTSUP;
goto done;
}
err = dump_btf_c(btf, root_type_ids, root_type_cnt);
} else {
err = dump_btf_raw(btf, root_type_ids, root_type_cnt);
}
done:
close(fd);
btf__free(btf);
btf__free(base);
return err;
}
static int btf_parse_fd(int *argc, char ***argv)
{
unsigned int id;
char *endptr;
int fd;
if (!is_prefix(*argv[0], "id")) {
p_err("expected 'id', got: '%s'?", **argv);
return -1;
}
NEXT_ARGP();
id = strtoul(**argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as ID", **argv);
return -1;
}
NEXT_ARGP();
fd = bpf_btf_get_fd_by_id(id);
if (fd < 0)
p_err("can't get BTF object by id (%u): %s",
id, strerror(errno));
return fd;
}
static int
build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
void *info, __u32 *len)
{
static const char * const names[] = {
[BPF_OBJ_UNKNOWN] = "unknown",
[BPF_OBJ_PROG] = "prog",
[BPF_OBJ_MAP] = "map",
};
__u32 btf_id, id = 0;
int err;
int fd;
while (true) {
switch (type) {
case BPF_OBJ_PROG:
err = bpf_prog_get_next_id(id, &id);
break;
case BPF_OBJ_MAP:
err = bpf_map_get_next_id(id, &id);
break;
default:
err = -1;
p_err("unexpected object type: %d", type);
goto err_free;
}
if (err) {
if (errno == ENOENT) {
err = 0;
break;
}
p_err("can't get next %s: %s%s", names[type],
strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
goto err_free;
}
switch (type) {
case BPF_OBJ_PROG:
fd = bpf_prog_get_fd_by_id(id);
break;
case BPF_OBJ_MAP:
fd = bpf_map_get_fd_by_id(id);
break;
default:
err = -1;
p_err("unexpected object type: %d", type);
goto err_free;
}
if (fd < 0) {
if (errno == ENOENT)
continue;
p_err("can't get %s by id (%u): %s", names[type], id,
strerror(errno));
err = -1;
goto err_free;
}
memset(info, 0, *len);
if (type == BPF_OBJ_PROG)
err = bpf_prog_get_info_by_fd(fd, info, len);
else
err = bpf_map_get_info_by_fd(fd, info, len);
close(fd);
if (err) {
p_err("can't get %s info: %s", names[type],
strerror(errno));
goto err_free;
}
switch (type) {
case BPF_OBJ_PROG:
btf_id = ((struct bpf_prog_info *)info)->btf_id;
break;
case BPF_OBJ_MAP:
btf_id = ((struct bpf_map_info *)info)->btf_id;
break;
default:
err = -1;
p_err("unexpected object type: %d", type);
goto err_free;
}
if (!btf_id)
continue;
err = hashmap__append(tab, btf_id, id);
if (err) {
p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s",
btf_id, id, strerror(-err));
goto err_free;
}
}
return 0;
err_free:
hashmap__free(tab);
return err;
}
static int
build_btf_tables(struct hashmap *btf_prog_table,
struct hashmap *btf_map_table)
{
struct bpf_prog_info prog_info;
__u32 prog_len = sizeof(prog_info);
struct bpf_map_info map_info;
__u32 map_len = sizeof(map_info);
int err = 0;
err = build_btf_type_table(btf_prog_table, BPF_OBJ_PROG, &prog_info,
&prog_len);
if (err)
return err;
err = build_btf_type_table(btf_map_table, BPF_OBJ_MAP, &map_info,
&map_len);
if (err) {
hashmap__free(btf_prog_table);
return err;
}
return 0;
}
static void
show_btf_plain(struct bpf_btf_info *info, int fd,
struct hashmap *btf_prog_table,
struct hashmap *btf_map_table)
{
struct hashmap_entry *entry;
const char *name = u64_to_ptr(info->name);
int n;
printf("%u: ", info->id);
if (info->kernel_btf)
printf("name [%s] ", name);
else if (name && name[0])
printf("name %s ", name);
else
printf("name <anon> ");
printf("size %uB", info->btf_size);
n = 0;
hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
printf("%s%lu", n++ == 0 ? " prog_ids " : ",", entry->value);
}
n = 0;
hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
printf("%s%lu", n++ == 0 ? " map_ids " : ",", entry->value);
}
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
printf("\n");
}
static void
show_btf_json(struct bpf_btf_info *info, int fd,
struct hashmap *btf_prog_table,
struct hashmap *btf_map_table)
{
struct hashmap_entry *entry;
const char *name = u64_to_ptr(info->name);
jsonw_start_object(json_wtr); /* btf object */
jsonw_uint_field(json_wtr, "id", info->id);
jsonw_uint_field(json_wtr, "size", info->btf_size);
jsonw_name(json_wtr, "prog_ids");
jsonw_start_array(json_wtr); /* prog_ids */
hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
jsonw_uint(json_wtr, entry->value);
}
jsonw_end_array(json_wtr); /* prog_ids */
jsonw_name(json_wtr, "map_ids");
jsonw_start_array(json_wtr); /* map_ids */
hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
jsonw_uint(json_wtr, entry->value);
}
jsonw_end_array(json_wtr); /* map_ids */
emit_obj_refs_json(refs_table, info->id, json_wtr); /* pids */
jsonw_bool_field(json_wtr, "kernel", info->kernel_btf);
if (name && name[0])
jsonw_string_field(json_wtr, "name", name);
jsonw_end_object(json_wtr); /* btf object */
}
static int
show_btf(int fd, struct hashmap *btf_prog_table,
struct hashmap *btf_map_table)
{
struct bpf_btf_info info;
__u32 len = sizeof(info);
char name[64];
int err;
memset(&info, 0, sizeof(info));
err = bpf_btf_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get BTF object info: %s", strerror(errno));
return -1;
}
/* if kernel support emitting BTF object name, pass name pointer */
if (info.name_len) {
memset(&info, 0, sizeof(info));
info.name_len = sizeof(name);
info.name = ptr_to_u64(name);
len = sizeof(info);
err = bpf_btf_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get BTF object info: %s", strerror(errno));
return -1;
}
}
if (json_output)
show_btf_json(&info, fd, btf_prog_table, btf_map_table);
else
show_btf_plain(&info, fd, btf_prog_table, btf_map_table);
return 0;
}
static int do_show(int argc, char **argv)
{
struct hashmap *btf_prog_table;
struct hashmap *btf_map_table;
int err, fd = -1;
__u32 id = 0;
if (argc == 2) {
fd = btf_parse_fd(&argc, &argv);
if (fd < 0)
return -1;
}
if (argc) {
if (fd >= 0)
close(fd);
return BAD_ARG();
}
btf_prog_table = hashmap__new(hash_fn_for_key_as_id,
equal_fn_for_key_as_id, NULL);
btf_map_table = hashmap__new(hash_fn_for_key_as_id,
equal_fn_for_key_as_id, NULL);
if (IS_ERR(btf_prog_table) || IS_ERR(btf_map_table)) {
hashmap__free(btf_prog_table);
hashmap__free(btf_map_table);
if (fd >= 0)
close(fd);
p_err("failed to create hashmap for object references");
return -1;
}
err = build_btf_tables(btf_prog_table, btf_map_table);
if (err) {
if (fd >= 0)
close(fd);
return err;
}
build_obj_refs_table(&refs_table, BPF_OBJ_BTF);
if (fd >= 0) {
err = show_btf(fd, btf_prog_table, btf_map_table);
close(fd);
goto exit_free;
}
if (json_output)
jsonw_start_array(json_wtr); /* root array */
while (true) {
err = bpf_btf_get_next_id(id, &id);
if (err) {
if (errno == ENOENT) {
err = 0;
break;
}
p_err("can't get next BTF object: %s%s",
strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
err = -1;
break;
}
fd = bpf_btf_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
p_err("can't get BTF object by id (%u): %s",
id, strerror(errno));
err = -1;
break;
}
err = show_btf(fd, btf_prog_table, btf_map_table);
close(fd);
if (err)
break;
}
if (json_output)
jsonw_end_array(json_wtr); /* root array */
exit_free:
hashmap__free(btf_prog_table);
hashmap__free(btf_map_table);
delete_obj_refs_table(refs_table);
return err;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s { show | list } [id BTF_ID]\n"
" %1$s %2$s dump BTF_SRC [format FORMAT]\n"
" %1$s %2$s help\n"
"\n"
" BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
" FORMAT := { raw | c }\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-B|--base-btf} }\n"
"",
bin_name, "btf");
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
{ "help", do_help },
{ "dump", do_dump },
{ 0 }
};
int do_btf(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/btf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
#include <ctype.h>
#include <stdio.h> /* for (FILE *) used by json_writer */
#include <string.h>
#include <unistd.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/btf.h>
#include <linux/err.h>
#include <bpf/btf.h>
#include <bpf/bpf.h>
#include "json_writer.h"
#include "main.h"
#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
#define BITS_ROUNDUP_BYTES(bits) \
(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data);
static int btf_dump_func(const struct btf *btf, char *func_sig,
const struct btf_type *func_proto,
const struct btf_type *func, int pos, int size);
static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
const struct btf_type *func_proto,
__u32 prog_id)
{
const struct btf_type *func_type;
int prog_fd = -1, func_sig_len;
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
const char *prog_name = NULL;
struct btf *prog_btf = NULL;
struct bpf_func_info finfo;
__u32 finfo_rec_size;
char prog_str[1024];
int err;
/* Get the ptr's func_proto */
func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
sizeof(prog_str));
if (func_sig_len == -1)
return -1;
if (!prog_id)
goto print;
/* Get the bpf_prog's name. Obtain from func_info. */
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd < 0)
goto print;
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err)
goto print;
if (!info.btf_id || !info.nr_func_info)
goto print;
finfo_rec_size = info.func_info_rec_size;
memset(&info, 0, sizeof(info));
info.nr_func_info = 1;
info.func_info_rec_size = finfo_rec_size;
info.func_info = ptr_to_u64(&finfo);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err)
goto print;
prog_btf = btf__load_from_kernel_by_id(info.btf_id);
if (!prog_btf)
goto print;
func_type = btf__type_by_id(prog_btf, finfo.type_id);
if (!func_type || !btf_is_func(func_type))
goto print;
prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
print:
if (!prog_id)
snprintf(&prog_str[func_sig_len],
sizeof(prog_str) - func_sig_len, " 0");
else if (prog_name)
snprintf(&prog_str[func_sig_len],
sizeof(prog_str) - func_sig_len,
" %s/prog_id:%u", prog_name, prog_id);
else
snprintf(&prog_str[func_sig_len],
sizeof(prog_str) - func_sig_len,
" <unknown_prog_name>/prog_id:%u", prog_id);
prog_str[sizeof(prog_str) - 1] = '\0';
jsonw_string(d->jw, prog_str);
btf__free(prog_btf);
if (prog_fd >= 0)
close(prog_fd);
return 0;
}
static void btf_dumper_ptr(const struct btf_dumper *d,
const struct btf_type *t,
const void *data)
{
unsigned long value = *(unsigned long *)data;
const struct btf_type *ptr_type;
__s32 ptr_type_id;
if (!d->prog_id_as_func_ptr || value > UINT32_MAX)
goto print_ptr_value;
ptr_type_id = btf__resolve_type(d->btf, t->type);
if (ptr_type_id < 0)
goto print_ptr_value;
ptr_type = btf__type_by_id(d->btf, ptr_type_id);
if (!ptr_type || !btf_is_func_proto(ptr_type))
goto print_ptr_value;
if (!dump_prog_id_as_func_ptr(d, ptr_type, value))
return;
print_ptr_value:
if (d->is_plain_text)
jsonw_printf(d->jw, "%p", (void *)value);
else
jsonw_printf(d->jw, "%lu", value);
}
static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data)
{
int actual_type_id;
actual_type_id = btf__resolve_type(d->btf, type_id);
if (actual_type_id < 0)
return actual_type_id;
return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
}
static int btf_dumper_enum(const struct btf_dumper *d,
const struct btf_type *t,
const void *data)
{
const struct btf_enum *enums = btf_enum(t);
__s64 value;
__u16 i;
switch (t->size) {
case 8:
value = *(__s64 *)data;
break;
case 4:
value = *(__s32 *)data;
break;
case 2:
value = *(__s16 *)data;
break;
case 1:
value = *(__s8 *)data;
break;
default:
return -EINVAL;
}
for (i = 0; i < btf_vlen(t); i++) {
if (value == enums[i].val) {
jsonw_string(d->jw,
btf__name_by_offset(d->btf,
enums[i].name_off));
return 0;
}
}
jsonw_int(d->jw, value);
return 0;
}
static int btf_dumper_enum64(const struct btf_dumper *d,
const struct btf_type *t,
const void *data)
{
const struct btf_enum64 *enums = btf_enum64(t);
__u32 val_lo32, val_hi32;
__u64 value;
__u16 i;
value = *(__u64 *)data;
val_lo32 = (__u32)value;
val_hi32 = value >> 32;
for (i = 0; i < btf_vlen(t); i++) {
if (val_lo32 == enums[i].val_lo32 && val_hi32 == enums[i].val_hi32) {
jsonw_string(d->jw,
btf__name_by_offset(d->btf,
enums[i].name_off));
return 0;
}
}
jsonw_int(d->jw, value);
return 0;
}
static bool is_str_array(const struct btf *btf, const struct btf_array *arr,
const char *s)
{
const struct btf_type *elem_type;
const char *end_s;
if (!arr->nelems)
return false;
elem_type = btf__type_by_id(btf, arr->type);
/* Not skipping typedef. typedef to char does not count as
* a string now.
*/
while (elem_type && btf_is_mod(elem_type))
elem_type = btf__type_by_id(btf, elem_type->type);
if (!elem_type || !btf_is_int(elem_type) || elem_type->size != 1)
return false;
if (btf_int_encoding(elem_type) != BTF_INT_CHAR &&
strcmp("char", btf__name_by_offset(btf, elem_type->name_off)))
return false;
end_s = s + arr->nelems;
while (s < end_s) {
if (!*s)
return true;
if (*s <= 0x1f || *s >= 0x7f)
return false;
s++;
}
/* '\0' is not found */
return false;
}
static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
const void *data)
{
const struct btf_type *t = btf__type_by_id(d->btf, type_id);
struct btf_array *arr = (struct btf_array *)(t + 1);
long long elem_size;
int ret = 0;
__u32 i;
if (is_str_array(d->btf, arr, data)) {
jsonw_string(d->jw, data);
return 0;
}
elem_size = btf__resolve_size(d->btf, arr->type);
if (elem_size < 0)
return elem_size;
jsonw_start_array(d->jw);
for (i = 0; i < arr->nelems; i++) {
ret = btf_dumper_do_type(d, arr->type, 0,
data + i * elem_size);
if (ret)
break;
}
jsonw_end_array(d->jw);
return ret;
}
static void btf_int128_print(json_writer_t *jw, const void *data,
bool is_plain_text)
{
/* data points to a __int128 number.
* Suppose
* int128_num = *(__int128 *)data;
* The below formulas shows what upper_num and lower_num represents:
* upper_num = int128_num >> 64;
* lower_num = int128_num & 0xffffffffFFFFFFFFULL;
*/
__u64 upper_num, lower_num;
#ifdef __BIG_ENDIAN_BITFIELD
upper_num = *(__u64 *)data;
lower_num = *(__u64 *)(data + 8);
#else
upper_num = *(__u64 *)(data + 8);
lower_num = *(__u64 *)data;
#endif
if (is_plain_text) {
if (upper_num == 0)
jsonw_printf(jw, "0x%llx", lower_num);
else
jsonw_printf(jw, "0x%llx%016llx", upper_num, lower_num);
} else {
if (upper_num == 0)
jsonw_printf(jw, "\"0x%llx\"", lower_num);
else
jsonw_printf(jw, "\"0x%llx%016llx\"", upper_num, lower_num);
}
}
static void btf_int128_shift(__u64 *print_num, __u16 left_shift_bits,
__u16 right_shift_bits)
{
__u64 upper_num, lower_num;
#ifdef __BIG_ENDIAN_BITFIELD
upper_num = print_num[0];
lower_num = print_num[1];
#else
upper_num = print_num[1];
lower_num = print_num[0];
#endif
/* shake out un-needed bits by shift/or operations */
if (left_shift_bits >= 64) {
upper_num = lower_num << (left_shift_bits - 64);
lower_num = 0;
} else {
upper_num = (upper_num << left_shift_bits) |
(lower_num >> (64 - left_shift_bits));
lower_num = lower_num << left_shift_bits;
}
if (right_shift_bits >= 64) {
lower_num = upper_num >> (right_shift_bits - 64);
upper_num = 0;
} else {
lower_num = (lower_num >> right_shift_bits) |
(upper_num << (64 - right_shift_bits));
upper_num = upper_num >> right_shift_bits;
}
#ifdef __BIG_ENDIAN_BITFIELD
print_num[0] = upper_num;
print_num[1] = lower_num;
#else
print_num[0] = lower_num;
print_num[1] = upper_num;
#endif
}
static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int left_shift_bits, right_shift_bits;
__u64 print_num[2] = {};
int bytes_to_copy;
int bits_to_copy;
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
memcpy(print_num, data, bytes_to_copy);
#if defined(__BIG_ENDIAN_BITFIELD)
left_shift_bits = bit_offset;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
left_shift_bits = 128 - bits_to_copy;
#else
#error neither big nor little endian
#endif
right_shift_bits = 128 - nr_bits;
btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
btf_int128_print(jw, print_num, is_plain_text);
}
static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int nr_bits = BTF_INT_BITS(int_type);
int total_bits_offset;
/* bits_offset is at most 7.
* BTF_INT_OFFSET() cannot exceed 128 bits.
*/
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
btf_dumper_bitfield(nr_bits, bit_offset, data, jw,
is_plain_text);
}
static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
__u32 *int_type;
__u32 nr_bits;
int_type = (__u32 *)(t + 1);
nr_bits = BTF_INT_BITS(*int_type);
/* if this is bit field */
if (bit_offset || BTF_INT_OFFSET(*int_type) ||
BITS_PER_BYTE_MASKED(nr_bits)) {
btf_dumper_int_bits(*int_type, bit_offset, data, jw,
is_plain_text);
return 0;
}
if (nr_bits == 128) {
btf_int128_print(jw, data, is_plain_text);
return 0;
}
switch (BTF_INT_ENCODING(*int_type)) {
case 0:
if (BTF_INT_BITS(*int_type) == 64)
jsonw_printf(jw, "%llu", *(__u64 *)data);
else if (BTF_INT_BITS(*int_type) == 32)
jsonw_printf(jw, "%u", *(__u32 *)data);
else if (BTF_INT_BITS(*int_type) == 16)
jsonw_printf(jw, "%hu", *(__u16 *)data);
else if (BTF_INT_BITS(*int_type) == 8)
jsonw_printf(jw, "%hhu", *(__u8 *)data);
else
btf_dumper_int_bits(*int_type, bit_offset, data, jw,
is_plain_text);
break;
case BTF_INT_SIGNED:
if (BTF_INT_BITS(*int_type) == 64)
jsonw_printf(jw, "%lld", *(long long *)data);
else if (BTF_INT_BITS(*int_type) == 32)
jsonw_printf(jw, "%d", *(int *)data);
else if (BTF_INT_BITS(*int_type) == 16)
jsonw_printf(jw, "%hd", *(short *)data);
else if (BTF_INT_BITS(*int_type) == 8)
jsonw_printf(jw, "%hhd", *(char *)data);
else
btf_dumper_int_bits(*int_type, bit_offset, data, jw,
is_plain_text);
break;
case BTF_INT_CHAR:
if (isprint(*(char *)data))
jsonw_printf(jw, "\"%c\"", *(char *)data);
else
if (is_plain_text)
jsonw_printf(jw, "0x%hhx", *(char *)data);
else
jsonw_printf(jw, "\"\\u00%02hhx\"",
*(char *)data);
break;
case BTF_INT_BOOL:
jsonw_bool(jw, *(bool *)data);
break;
default:
/* shouldn't happen */
return -EINVAL;
}
return 0;
}
static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
const void *data)
{
const struct btf_type *t;
struct btf_member *m;
const void *data_off;
int kind_flag;
int ret = 0;
int i, vlen;
t = btf__type_by_id(d->btf, type_id);
if (!t)
return -EINVAL;
kind_flag = BTF_INFO_KFLAG(t->info);
vlen = BTF_INFO_VLEN(t->info);
jsonw_start_object(d->jw);
m = (struct btf_member *)(t + 1);
for (i = 0; i < vlen; i++) {
__u32 bit_offset = m[i].offset;
__u32 bitfield_size = 0;
if (kind_flag) {
bitfield_size = BTF_MEMBER_BITFIELD_SIZE(bit_offset);
bit_offset = BTF_MEMBER_BIT_OFFSET(bit_offset);
}
jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
if (bitfield_size) {
btf_dumper_bitfield(bitfield_size,
BITS_PER_BYTE_MASKED(bit_offset),
data_off, d->jw, d->is_plain_text);
} else {
ret = btf_dumper_do_type(d, m[i].type,
BITS_PER_BYTE_MASKED(bit_offset),
data_off);
if (ret)
break;
}
}
jsonw_end_object(d->jw);
return ret;
}
static int btf_dumper_var(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data)
{
const struct btf_type *t = btf__type_by_id(d->btf, type_id);
int ret;
jsonw_start_object(d->jw);
jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
ret = btf_dumper_do_type(d, t->type, bit_offset, data);
jsonw_end_object(d->jw);
return ret;
}
static int btf_dumper_datasec(const struct btf_dumper *d, __u32 type_id,
const void *data)
{
struct btf_var_secinfo *vsi;
const struct btf_type *t;
int ret = 0, i, vlen;
t = btf__type_by_id(d->btf, type_id);
if (!t)
return -EINVAL;
vlen = BTF_INFO_VLEN(t->info);
vsi = (struct btf_var_secinfo *)(t + 1);
jsonw_start_object(d->jw);
jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
jsonw_start_array(d->jw);
for (i = 0; i < vlen; i++) {
ret = btf_dumper_do_type(d, vsi[i].type, 0, data + vsi[i].offset);
if (ret)
break;
}
jsonw_end_array(d->jw);
jsonw_end_object(d->jw);
return ret;
}
static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data)
{
const struct btf_type *t = btf__type_by_id(d->btf, type_id);
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_INT:
return btf_dumper_int(t, bit_offset, data, d->jw,
d->is_plain_text);
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
return btf_dumper_struct(d, type_id, data);
case BTF_KIND_ARRAY:
return btf_dumper_array(d, type_id, data);
case BTF_KIND_ENUM:
return btf_dumper_enum(d, t, data);
case BTF_KIND_ENUM64:
return btf_dumper_enum64(d, t, data);
case BTF_KIND_PTR:
btf_dumper_ptr(d, t, data);
return 0;
case BTF_KIND_UNKN:
jsonw_printf(d->jw, "(unknown)");
return 0;
case BTF_KIND_FWD:
/* map key or value can't be forward */
jsonw_printf(d->jw, "(fwd-kind-invalid)");
return -EINVAL;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf_dumper_modifier(d, type_id, bit_offset, data);
case BTF_KIND_VAR:
return btf_dumper_var(d, type_id, bit_offset, data);
case BTF_KIND_DATASEC:
return btf_dumper_datasec(d, type_id, data);
default:
jsonw_printf(d->jw, "(unsupported-kind");
return -EINVAL;
}
}
int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
const void *data)
{
return btf_dumper_do_type(d, type_id, 0, data);
}
#define BTF_PRINT_ARG(...) \
do { \
pos += snprintf(func_sig + pos, size - pos, \
__VA_ARGS__); \
if (pos >= size) \
return -1; \
} while (0)
#define BTF_PRINT_TYPE(type) \
do { \
pos = __btf_dumper_type_only(btf, type, func_sig, \
pos, size); \
if (pos == -1) \
return -1; \
} while (0)
static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
char *func_sig, int pos, int size)
{
const struct btf_type *proto_type;
const struct btf_array *array;
const struct btf_var *var;
const struct btf_type *t;
if (!type_id) {
BTF_PRINT_ARG("void ");
return pos;
}
t = btf__type_by_id(btf, type_id);
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_INT:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FLOAT:
BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_STRUCT:
BTF_PRINT_ARG("struct %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_UNION:
BTF_PRINT_ARG("union %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
BTF_PRINT_ARG("enum %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_ARRAY:
array = (struct btf_array *)(t + 1);
BTF_PRINT_TYPE(array->type);
BTF_PRINT_ARG("[%d]", array->nelems);
break;
case BTF_KIND_PTR:
BTF_PRINT_TYPE(t->type);
BTF_PRINT_ARG("* ");
break;
case BTF_KIND_FWD:
BTF_PRINT_ARG("%s %s ",
BTF_INFO_KFLAG(t->info) ? "union" : "struct",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_VOLATILE:
BTF_PRINT_ARG("volatile ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_CONST:
BTF_PRINT_ARG("const ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_RESTRICT:
BTF_PRINT_ARG("restrict ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_FUNC_PROTO:
pos = btf_dump_func(btf, func_sig, t, NULL, pos, size);
if (pos == -1)
return -1;
break;
case BTF_KIND_FUNC:
proto_type = btf__type_by_id(btf, t->type);
pos = btf_dump_func(btf, func_sig, proto_type, t, pos, size);
if (pos == -1)
return -1;
break;
case BTF_KIND_VAR:
var = (struct btf_var *)(t + 1);
if (var->linkage == BTF_VAR_STATIC)
BTF_PRINT_ARG("static ");
BTF_PRINT_TYPE(t->type);
BTF_PRINT_ARG(" %s",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_DATASEC:
BTF_PRINT_ARG("section (\"%s\") ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_UNKN:
default:
return -1;
}
return pos;
}
static int btf_dump_func(const struct btf *btf, char *func_sig,
const struct btf_type *func_proto,
const struct btf_type *func, int pos, int size)
{
int i, vlen;
BTF_PRINT_TYPE(func_proto->type);
if (func)
BTF_PRINT_ARG("%s(", btf__name_by_offset(btf, func->name_off));
else
BTF_PRINT_ARG("(");
vlen = BTF_INFO_VLEN(func_proto->info);
for (i = 0; i < vlen; i++) {
struct btf_param *arg = &((struct btf_param *)(func_proto + 1))[i];
if (i)
BTF_PRINT_ARG(", ");
if (arg->type) {
BTF_PRINT_TYPE(arg->type);
if (arg->name_off)
BTF_PRINT_ARG("%s",
btf__name_by_offset(btf, arg->name_off));
else if (pos && func_sig[pos - 1] == ' ')
/* Remove unnecessary space for
* FUNC_PROTO that does not have
* arg->name_off
*/
func_sig[--pos] = '\0';
} else {
BTF_PRINT_ARG("...");
}
}
BTF_PRINT_ARG(")");
return pos;
}
void btf_dumper_type_only(const struct btf *btf, __u32 type_id, char *func_sig,
int size)
{
int err;
func_sig[0] = '\0';
if (!btf)
return;
err = __btf_dumper_type_only(btf, type_id, func_sig, 0, size);
if (err < 0)
func_sig[0] = '\0';
}
static const char *ltrim(const char *s)
{
while (isspace(*s))
s++;
return s;
}
void btf_dump_linfo_plain(const struct btf *btf,
const struct bpf_line_info *linfo,
const char *prefix, bool linum)
{
const char *line = btf__name_by_offset(btf, linfo->line_off);
if (!line)
return;
line = ltrim(line);
if (!prefix)
prefix = "";
if (linum) {
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
/* More forgiving on file because linum option is
* expected to provide more info than the already
* available src line.
*/
if (!file)
file = "";
printf("%s%s [file:%s line_num:%u line_col:%u]\n",
prefix, line, file,
BPF_LINE_INFO_LINE_NUM(linfo->line_col),
BPF_LINE_INFO_LINE_COL(linfo->line_col));
} else {
printf("%s%s\n", prefix, line);
}
}
void btf_dump_linfo_json(const struct btf *btf,
const struct bpf_line_info *linfo, bool linum)
{
const char *line = btf__name_by_offset(btf, linfo->line_off);
if (line)
jsonw_string_field(json_wtr, "src", ltrim(line));
if (linum) {
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
if (file)
jsonw_string_field(json_wtr, "file", file);
if (BPF_LINE_INFO_LINE_NUM(linfo->line_col))
jsonw_int_field(json_wtr, "line_num",
BPF_LINE_INFO_LINE_NUM(linfo->line_col));
if (BPF_LINE_INFO_LINE_COL(linfo->line_col))
jsonw_int_field(json_wtr, "line_col",
BPF_LINE_INFO_LINE_COL(linfo->line_col));
}
}
static void dotlabel_puts(const char *s)
{
for (; *s; ++s) {
switch (*s) {
case '\\':
case '"':
case '{':
case '}':
case '<':
case '>':
case '|':
case ' ':
putchar('\\');
fallthrough;
default:
putchar(*s);
}
}
}
static const char *shorten_path(const char *path)
{
const unsigned int MAX_PATH_LEN = 32;
size_t len = strlen(path);
const char *shortpath;
if (len <= MAX_PATH_LEN)
return path;
/* Search for last '/' under the MAX_PATH_LEN limit */
shortpath = strchr(path + len - MAX_PATH_LEN, '/');
if (shortpath) {
if (shortpath < path + strlen("..."))
/* We removed a very short prefix, e.g. "/w", and we'll
* make the path longer by prefixing with the ellipsis.
* Not worth it, keep initial path.
*/
return path;
return shortpath;
}
/* File base name length is > MAX_PATH_LEN, search for last '/' */
shortpath = strrchr(path, '/');
if (shortpath)
return shortpath;
return path;
}
void btf_dump_linfo_dotlabel(const struct btf *btf,
const struct bpf_line_info *linfo, bool linum)
{
const char *line = btf__name_by_offset(btf, linfo->line_off);
if (!line || !strlen(line))
return;
line = ltrim(line);
if (linum) {
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
const char *shortfile;
/* More forgiving on file because linum option is
* expected to provide more info than the already
* available src line.
*/
if (!file)
shortfile = "";
else
shortfile = shorten_path(file);
printf("; [%s", shortfile > file ? "..." : "");
dotlabel_puts(shortfile);
printf(" line:%u col:%u]\\l\\\n",
BPF_LINE_INFO_LINE_NUM(linfo->line_col),
BPF_LINE_INFO_LINE_COL(linfo->line_col));
}
printf("; ");
dotlabel_puts(line);
printf("\\l\\\n");
}
| linux-master | tools/bpf/bpftool/btf_dumper.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Facebook */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/err.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <bpf/libbpf_internal.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <bpf/btf.h>
#include "json_writer.h"
#include "main.h"
#define MAX_OBJ_NAME_LEN 64
static void sanitize_identifier(char *name)
{
int i;
for (i = 0; name[i]; i++)
if (!isalnum(name[i]) && name[i] != '_')
name[i] = '_';
}
static bool str_has_prefix(const char *str, const char *prefix)
{
return strncmp(str, prefix, strlen(prefix)) == 0;
}
static bool str_has_suffix(const char *str, const char *suffix)
{
size_t i, n1 = strlen(str), n2 = strlen(suffix);
if (n1 < n2)
return false;
for (i = 0; i < n2; i++) {
if (str[n1 - i - 1] != suffix[n2 - i - 1])
return false;
}
return true;
}
static void get_obj_name(char *name, const char *file)
{
/* Using basename() GNU version which doesn't modify arg. */
strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
name[MAX_OBJ_NAME_LEN - 1] = '\0';
if (str_has_suffix(name, ".o"))
name[strlen(name) - 2] = '\0';
sanitize_identifier(name);
}
static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
{
int i;
sprintf(guard, "__%s_%s__", obj_name, suffix);
for (i = 0; guard[i]; i++)
guard[i] = toupper(guard[i]);
}
static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
{
static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
const char *name = bpf_map__name(map);
int i, n;
if (!bpf_map__is_internal(map)) {
snprintf(buf, buf_sz, "%s", name);
return true;
}
for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
const char *sfx = sfxs[i], *p;
p = strstr(name, sfx);
if (p) {
snprintf(buf, buf_sz, "%s", p + 1);
sanitize_identifier(buf);
return true;
}
}
return false;
}
static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
{
static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
int i, n;
for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
const char *pfx = pfxs[i];
if (str_has_prefix(sec_name, pfx)) {
snprintf(buf, buf_sz, "%s", sec_name + 1);
sanitize_identifier(buf);
return true;
}
}
return false;
}
static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
{
vprintf(fmt, args);
}
static int codegen_datasec_def(struct bpf_object *obj,
struct btf *btf,
struct btf_dump *d,
const struct btf_type *sec,
const char *obj_name)
{
const char *sec_name = btf__name_by_offset(btf, sec->name_off);
const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
char var_ident[256], sec_ident[256];
bool strip_mods = false;
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
return 0;
if (strcmp(sec_name, ".kconfig") != 0)
strip_mods = true;
printf(" struct %s__%s {\n", obj_name, sec_ident);
for (i = 0; i < vlen; i++, sec_var++) {
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
const char *var_name = btf__name_by_offset(btf, var->name_off);
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
.field_name = var_ident,
.indent_level = 2,
.strip_mods = strip_mods,
);
int need_off = sec_var->offset, align_off, align;
__u32 var_type_id = var->type;
/* static variables are not exposed through BPF skeleton */
if (btf_var(var)->linkage == BTF_VAR_STATIC)
continue;
if (off > need_off) {
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
sec_name, i, need_off, off);
return -EINVAL;
}
align = btf__align_of(btf, var->type);
if (align <= 0) {
p_err("Failed to determine alignment of variable '%s': %d",
var_name, align);
return -EINVAL;
}
/* Assume 32-bit architectures when generating data section
* struct memory layout. Given bpftool can't know which target
* host architecture it's emitting skeleton for, we need to be
* conservative and assume 32-bit one to ensure enough padding
* bytes are generated for pointer and long types. This will
* still work correctly for 64-bit architectures, because in
* the worst case we'll generate unnecessary padding field,
* which on 64-bit architectures is not strictly necessary and
* would be handled by natural 8-byte alignment. But it still
* will be a correct memory layout, based on recorded offsets
* in BTF.
*/
if (align > 4)
align = 4;
align_off = (off + align - 1) / align * align;
if (align_off != need_off) {
printf("\t\tchar __pad%d[%d];\n",
pad_cnt, need_off - off);
pad_cnt++;
}
/* sanitize variable name, e.g., for static vars inside
* a function, it's name is '<function name>.<variable name>',
* which we'll turn into a '<function name>_<variable name>'
*/
var_ident[0] = '\0';
strncat(var_ident, var_name, sizeof(var_ident) - 1);
sanitize_identifier(var_ident);
printf("\t\t");
err = btf_dump__emit_type_decl(d, var_type_id, &opts);
if (err)
return err;
printf(";\n");
off = sec_var->offset + sec_var->size;
}
printf(" } *%s;\n", sec_ident);
return 0;
}
static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
{
int n = btf__type_cnt(btf), i;
char sec_ident[256];
for (i = 1; i < n; i++) {
const struct btf_type *t = btf__type_by_id(btf, i);
const char *name;
if (!btf_is_datasec(t))
continue;
name = btf__str_by_offset(btf, t->name_off);
if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
continue;
if (strcmp(sec_ident, map_ident) == 0)
return t;
}
return NULL;
}
static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
{
if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
return false;
if (!get_map_ident(map, buf, sz))
return false;
return true;
}
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
struct btf_dump *d;
struct bpf_map *map;
const struct btf_type *sec;
char map_ident[256];
int err = 0;
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
if (!d)
return -errno;
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
/* In some cases (e.g., sections like .rodata.cst16 containing
* compiler allocated string constants only) there will be
* special internal maps with no corresponding DATASEC BTF
* type. In such case, generate empty structs for each such
* map. It will still be memory-mapped and its contents
* accessible from user-space through BPF skeleton.
*/
if (!sec) {
printf(" struct %s__%s {\n", obj_name, map_ident);
printf(" } *%s;\n", map_ident);
} else {
err = codegen_datasec_def(obj, btf, d, sec, obj_name);
if (err)
goto out;
}
}
out:
btf_dump__free(d);
return err;
}
static bool btf_is_ptr_to_func_proto(const struct btf *btf,
const struct btf_type *v)
{
return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
}
static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
struct btf_dump *d;
struct bpf_map *map;
const struct btf_type *sec, *var;
const struct btf_var_secinfo *sec_var;
int i, err = 0, vlen;
char map_ident[256], sec_ident[256];
bool strip_mods = false, needs_typeof = false;
const char *sec_name, *var_name;
__u32 var_type_id;
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
if (!d)
return -errno;
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
if (!sec)
continue;
sec_name = btf__name_by_offset(btf, sec->name_off);
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
continue;
strip_mods = strcmp(sec_name, ".kconfig") != 0;
printf(" struct %s__%s {\n", obj_name, sec_ident);
sec_var = btf_var_secinfos(sec);
vlen = btf_vlen(sec);
for (i = 0; i < vlen; i++, sec_var++) {
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
.indent_level = 2,
.strip_mods = strip_mods,
/* we'll print the name separately */
.field_name = "",
);
var = btf__type_by_id(btf, sec_var->type);
var_name = btf__name_by_offset(btf, var->name_off);
var_type_id = var->type;
/* static variables are not exposed through BPF skeleton */
if (btf_var(var)->linkage == BTF_VAR_STATIC)
continue;
/* The datasec member has KIND_VAR but we want the
* underlying type of the variable (e.g. KIND_INT).
*/
var = skip_mods_and_typedefs(btf, var->type, NULL);
printf("\t\t");
/* Func and array members require special handling.
* Instead of producing `typename *var`, they produce
* `typeof(typename) *var`. This allows us to keep a
* similar syntax where the identifier is just prefixed
* by *, allowing us to ignore C declaration minutiae.
*/
needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
if (needs_typeof)
printf("typeof(");
err = btf_dump__emit_type_decl(d, var_type_id, &opts);
if (err)
goto out;
if (needs_typeof)
printf(")");
printf(" *%s;\n", var_name);
}
printf(" } %s;\n", sec_ident);
}
out:
btf_dump__free(d);
return err;
}
static void codegen(const char *template, ...)
{
const char *src, *end;
int skip_tabs = 0, n;
char *s, *dst;
va_list args;
char c;
n = strlen(template);
s = malloc(n + 1);
if (!s)
exit(-1);
src = template;
dst = s;
/* find out "baseline" indentation to skip */
while ((c = *src++)) {
if (c == '\t') {
skip_tabs++;
} else if (c == '\n') {
break;
} else {
p_err("unrecognized character at pos %td in template '%s': '%c'",
src - template - 1, template, c);
free(s);
exit(-1);
}
}
while (*src) {
/* skip baseline indentation tabs */
for (n = skip_tabs; n > 0; n--, src++) {
if (*src != '\t') {
p_err("not enough tabs at pos %td in template '%s'",
src - template - 1, template);
free(s);
exit(-1);
}
}
/* trim trailing whitespace */
end = strchrnul(src, '\n');
for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
;
memcpy(dst, src, n);
dst += n;
if (*end)
*dst++ = '\n';
src = *end ? end + 1 : end;
}
*dst++ = '\0';
/* print out using adjusted template */
va_start(args, template);
n = vprintf(s, args);
va_end(args);
free(s);
}
static void print_hex(const char *data, int data_sz)
{
int i, len;
for (i = 0, len = 0; i < data_sz; i++) {
int w = data[i] ? 4 : 2;
len += w;
if (len > 78) {
printf("\\\n");
len = w;
}
if (!data[i])
printf("\\0");
else
printf("\\x%02x", (unsigned char)data[i]);
}
}
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
{
long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
map_sz = roundup(map_sz, page_sz);
return map_sz;
}
/* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
struct bpf_map *map;
struct btf_var_secinfo *sec_var;
int i, vlen;
const struct btf_type *sec;
char map_ident[256], var_ident[256];
if (!btf)
return;
codegen("\
\n\
__attribute__((unused)) static void \n\
%1$s__assert(struct %1$s *s __attribute__((unused))) \n\
{ \n\
#ifdef __cplusplus \n\
#define _Static_assert static_assert \n\
#endif \n\
", obj_name);
bpf_object__for_each_map(map, obj) {
if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
if (!sec) {
/* best effort, couldn't find the type for this map */
continue;
}
sec_var = btf_var_secinfos(sec);
vlen = btf_vlen(sec);
for (i = 0; i < vlen; i++, sec_var++) {
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
const char *var_name = btf__name_by_offset(btf, var->name_off);
long var_size;
/* static variables are not exposed through BPF skeleton */
if (btf_var(var)->linkage == BTF_VAR_STATIC)
continue;
var_size = btf__resolve_size(btf, var->type);
if (var_size < 0)
continue;
var_ident[0] = '\0';
strncat(var_ident, var_name, sizeof(var_ident) - 1);
sanitize_identifier(var_ident);
printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
map_ident, var_ident, var_size, var_ident);
}
}
codegen("\
\n\
#ifdef __cplusplus \n\
#undef _Static_assert \n\
#endif \n\
} \n\
");
}
static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
{
struct bpf_program *prog;
bpf_object__for_each_program(prog, obj) {
const char *tp_name;
codegen("\
\n\
\n\
static inline int \n\
%1$s__%2$s__attach(struct %1$s *skel) \n\
{ \n\
int prog_fd = skel->progs.%2$s.prog_fd; \n\
", obj_name, bpf_program__name(prog));
switch (bpf_program__type(prog)) {
case BPF_PROG_TYPE_RAW_TRACEPOINT:
tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
break;
case BPF_PROG_TYPE_TRACING:
case BPF_PROG_TYPE_LSM:
if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
else
printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
break;
default:
printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
break;
}
codegen("\
\n\
\n\
if (fd > 0) \n\
skel->links.%1$s_fd = fd; \n\
return fd; \n\
} \n\
", bpf_program__name(prog));
}
codegen("\
\n\
\n\
static inline int \n\
%1$s__attach(struct %1$s *skel) \n\
{ \n\
int ret = 0; \n\
\n\
", obj_name);
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
", obj_name, bpf_program__name(prog));
}
codegen("\
\n\
return ret < 0 ? ret : 0; \n\
} \n\
\n\
static inline void \n\
%1$s__detach(struct %1$s *skel) \n\
{ \n\
", obj_name);
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
skel_closenz(skel->links.%1$s_fd); \n\
", bpf_program__name(prog));
}
codegen("\
\n\
} \n\
");
}
static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
{
struct bpf_program *prog;
struct bpf_map *map;
char ident[256];
codegen("\
\n\
static void \n\
%1$s__destroy(struct %1$s *skel) \n\
{ \n\
if (!skel) \n\
return; \n\
%1$s__detach(skel); \n\
",
obj_name);
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
skel_closenz(skel->progs.%1$s.prog_fd); \n\
", bpf_program__name(prog));
}
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
if (bpf_map__is_internal(map) &&
(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
ident, bpf_map_mmap_sz(map));
codegen("\
\n\
skel_closenz(skel->maps.%1$s.map_fd); \n\
", ident);
}
codegen("\
\n\
skel_free(skel); \n\
} \n\
",
obj_name);
}
static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
{
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
struct bpf_map *map;
char ident[256];
int err = 0;
err = bpf_object__gen_loader(obj, &opts);
if (err)
return err;
err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
goto out;
}
/* If there was no error during load then gen_loader_opts
* are populated with the loader program.
*/
/* finish generating 'struct skel' */
codegen("\
\n\
}; \n\
", obj_name);
codegen_attach_detach(obj, obj_name);
codegen_destroy(obj, obj_name);
codegen("\
\n\
static inline struct %1$s * \n\
%1$s__open(void) \n\
{ \n\
struct %1$s *skel; \n\
\n\
skel = skel_alloc(sizeof(*skel)); \n\
if (!skel) \n\
goto cleanup; \n\
skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
",
obj_name, opts.data_sz);
bpf_object__for_each_map(map, obj) {
const void *mmap_data = NULL;
size_t mmap_size = 0;
if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
codegen("\
\n\
skel->%1$s = skel_prep_map_data((void *)\"\\ \n\
", ident);
mmap_data = bpf_map__initial_value(map, &mmap_size);
print_hex(mmap_data, mmap_size);
codegen("\
\n\
\", %1$zd, %2$zd); \n\
if (!skel->%3$s) \n\
goto cleanup; \n\
skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
", bpf_map_mmap_sz(map), mmap_size, ident);
}
codegen("\
\n\
return skel; \n\
cleanup: \n\
%1$s__destroy(skel); \n\
return NULL; \n\
} \n\
\n\
static inline int \n\
%1$s__load(struct %1$s *skel) \n\
{ \n\
struct bpf_load_and_run_opts opts = {}; \n\
int err; \n\
\n\
opts.ctx = (struct bpf_loader_ctx *)skel; \n\
opts.data_sz = %2$d; \n\
opts.data = (void *)\"\\ \n\
",
obj_name, opts.data_sz);
print_hex(opts.data, opts.data_sz);
codegen("\
\n\
\"; \n\
");
codegen("\
\n\
opts.insns_sz = %d; \n\
opts.insns = (void *)\"\\ \n\
",
opts.insns_sz);
print_hex(opts.insns, opts.insns_sz);
codegen("\
\n\
\"; \n\
err = bpf_load_and_run(&opts); \n\
if (err < 0) \n\
return err; \n\
", obj_name);
bpf_object__for_each_map(map, obj) {
const char *mmap_flags;
if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
mmap_flags = "PROT_READ";
else
mmap_flags = "PROT_READ | PROT_WRITE";
codegen("\
\n\
skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
%2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
if (!skel->%1$s) \n\
return -ENOMEM; \n\
",
ident, bpf_map_mmap_sz(map), mmap_flags);
}
codegen("\
\n\
return 0; \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open_and_load(void) \n\
{ \n\
struct %1$s *skel; \n\
\n\
skel = %1$s__open(); \n\
if (!skel) \n\
return NULL; \n\
if (%1$s__load(skel)) { \n\
%1$s__destroy(skel); \n\
return NULL; \n\
} \n\
return skel; \n\
} \n\
\n\
", obj_name);
codegen_asserts(obj, obj_name);
codegen("\
\n\
\n\
#endif /* %s */ \n\
",
header_guard);
err = 0;
out:
return err;
}
static void
codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
{
struct bpf_map *map;
char ident[256];
size_t i;
if (!map_cnt)
return;
codegen("\
\n\
\n\
/* maps */ \n\
s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
if (!s->maps) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
",
map_cnt
);
i = 0;
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
codegen("\
\n\
\n\
s->maps[%zu].name = \"%s\"; \n\
s->maps[%zu].map = &obj->maps.%s; \n\
",
i, bpf_map__name(map), i, ident);
/* memory-mapped internal maps */
if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
i, ident);
}
i++;
}
}
static void
codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
{
struct bpf_program *prog;
int i;
if (!prog_cnt)
return;
codegen("\
\n\
\n\
/* programs */ \n\
s->prog_cnt = %zu; \n\
s->prog_skel_sz = sizeof(*s->progs); \n\
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
if (!s->progs) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
",
prog_cnt
);
i = 0;
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
\n\
s->progs[%1$zu].name = \"%2$s\"; \n\
s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
",
i, bpf_program__name(prog));
if (populate_links) {
codegen("\
\n\
s->progs[%1$zu].link = &obj->links.%2$s;\n\
",
i, bpf_program__name(prog));
}
i++;
}
}
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
const char *file;
char ident[256];
struct bpf_program *prog;
int fd, err = -1;
struct bpf_map *map;
struct btf *btf;
struct stat st;
if (!REQ_ARGS(1)) {
usage();
return -1;
}
file = GET_ARG();
while (argc) {
if (!REQ_ARGS(2))
return -1;
if (is_prefix(*argv, "name")) {
NEXT_ARG();
if (obj_name[0] != '\0') {
p_err("object name already specified");
return -1;
}
strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
} else {
p_err("unknown arg %s", *argv);
return -1;
}
NEXT_ARG();
}
if (argc) {
p_err("extra unknown arguments");
return -1;
}
if (stat(file, &st)) {
p_err("failed to stat() %s: %s", file, strerror(errno));
return -1;
}
file_sz = st.st_size;
mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
fd = open(file, O_RDONLY);
if (fd < 0) {
p_err("failed to open() %s: %s", file, strerror(errno));
return -1;
}
obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
if (obj_data == MAP_FAILED) {
obj_data = NULL;
p_err("failed to mmap() %s: %s", file, strerror(errno));
goto out;
}
if (obj_name[0] == '\0')
get_obj_name(obj_name, file);
opts.object_name = obj_name;
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
if (!obj) {
char err_buf[256];
err = -errno;
libbpf_strerror(err, err_buf, sizeof(err_buf));
p_err("failed to open BPF object file: %s", err_buf);
goto out;
}
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident))) {
p_err("ignoring unrecognized internal map '%s'...",
bpf_map__name(map));
continue;
}
map_cnt++;
}
bpf_object__for_each_program(prog, obj) {
prog_cnt++;
}
get_header_guard(header_guard, obj_name, "SKEL_H");
if (use_loader) {
codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
#include <bpf/skel_internal.h> \n\
\n\
struct %1$s { \n\
struct bpf_loader_ctx ctx; \n\
",
obj_name, header_guard
);
} else {
codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
#include <errno.h> \n\
#include <stdlib.h> \n\
#include <bpf/libbpf.h> \n\
\n\
struct %1$s { \n\
struct bpf_object_skeleton *skeleton; \n\
struct bpf_object *obj; \n\
",
obj_name, header_guard
);
}
if (map_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
if (use_loader)
printf("\t\tstruct bpf_map_desc %s;\n", ident);
else
printf("\t\tstruct bpf_map *%s;\n", ident);
}
printf("\t} maps;\n");
}
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
if (use_loader)
printf("\t\tstruct bpf_prog_desc %s;\n",
bpf_program__name(prog));
else
printf("\t\tstruct bpf_program *%s;\n",
bpf_program__name(prog));
}
printf("\t} progs;\n");
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
if (use_loader)
printf("\t\tint %s_fd;\n",
bpf_program__name(prog));
else
printf("\t\tstruct bpf_link *%s;\n",
bpf_program__name(prog));
}
printf("\t} links;\n");
}
btf = bpf_object__btf(obj);
if (btf) {
err = codegen_datasecs(obj, obj_name);
if (err)
goto out;
}
if (use_loader) {
err = gen_trace(obj, obj_name, header_guard);
goto out;
}
codegen("\
\n\
\n\
#ifdef __cplusplus \n\
static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
static inline struct %1$s *open_and_load(); \n\
static inline int load(struct %1$s *skel); \n\
static inline int attach(struct %1$s *skel); \n\
static inline void detach(struct %1$s *skel); \n\
static inline void destroy(struct %1$s *skel); \n\
static inline const void *elf_bytes(size_t *sz); \n\
#endif /* __cplusplus */ \n\
}; \n\
\n\
static void \n\
%1$s__destroy(struct %1$s *obj) \n\
{ \n\
if (!obj) \n\
return; \n\
if (obj->skeleton) \n\
bpf_object__destroy_skeleton(obj->skeleton);\n\
free(obj); \n\
} \n\
\n\
static inline int \n\
%1$s__create_skeleton(struct %1$s *obj); \n\
\n\
static inline struct %1$s * \n\
%1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
{ \n\
struct %1$s *obj; \n\
int err; \n\
\n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
if (!obj) { \n\
errno = ENOMEM; \n\
return NULL; \n\
} \n\
\n\
err = %1$s__create_skeleton(obj); \n\
if (err) \n\
goto err_out; \n\
\n\
err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
if (err) \n\
goto err_out; \n\
\n\
return obj; \n\
err_out: \n\
%1$s__destroy(obj); \n\
errno = -err; \n\
return NULL; \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open(void) \n\
{ \n\
return %1$s__open_opts(NULL); \n\
} \n\
\n\
static inline int \n\
%1$s__load(struct %1$s *obj) \n\
{ \n\
return bpf_object__load_skeleton(obj->skeleton); \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open_and_load(void) \n\
{ \n\
struct %1$s *obj; \n\
int err; \n\
\n\
obj = %1$s__open(); \n\
if (!obj) \n\
return NULL; \n\
err = %1$s__load(obj); \n\
if (err) { \n\
%1$s__destroy(obj); \n\
errno = -err; \n\
return NULL; \n\
} \n\
return obj; \n\
} \n\
\n\
static inline int \n\
%1$s__attach(struct %1$s *obj) \n\
{ \n\
return bpf_object__attach_skeleton(obj->skeleton); \n\
} \n\
\n\
static inline void \n\
%1$s__detach(struct %1$s *obj) \n\
{ \n\
bpf_object__detach_skeleton(obj->skeleton); \n\
} \n\
",
obj_name
);
codegen("\
\n\
\n\
static inline const void *%1$s__elf_bytes(size_t *sz); \n\
\n\
static inline int \n\
%1$s__create_skeleton(struct %1$s *obj) \n\
{ \n\
struct bpf_object_skeleton *s; \n\
int err; \n\
\n\
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
if (!s) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
\n\
s->sz = sizeof(*s); \n\
s->name = \"%1$s\"; \n\
s->obj = &obj->obj; \n\
",
obj_name
);
codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
codegen("\
\n\
\n\
s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
\n\
obj->skeleton = s; \n\
return 0; \n\
err: \n\
bpf_object__destroy_skeleton(s); \n\
return err; \n\
} \n\
\n\
static inline const void *%2$s__elf_bytes(size_t *sz) \n\
{ \n\
*sz = %1$d; \n\
return (const void *)\"\\ \n\
"
, file_sz, obj_name);
/* embed contents of BPF object file */
print_hex(obj_data, file_sz);
codegen("\
\n\
\"; \n\
} \n\
\n\
#ifdef __cplusplus \n\
struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
#endif /* __cplusplus */ \n\
\n\
",
obj_name);
codegen_asserts(obj, obj_name);
codegen("\
\n\
\n\
#endif /* %1$s */ \n\
",
header_guard);
err = 0;
out:
bpf_object__close(obj);
if (obj_data)
munmap(obj_data, mmap_sz);
close(fd);
return err;
}
/* Subskeletons are like skeletons, except they don't own the bpf_object,
* associated maps, links, etc. Instead, they know about the existence of
* variables, maps, programs and are able to find their locations
* _at runtime_ from an already loaded bpf_object.
*
* This allows for library-like BPF objects to have userspace counterparts
* with access to their own items without having to know anything about the
* final BPF object that the library was linked into.
*/
static int do_subskeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
const char *file, *var_name;
char ident[256];
int fd, err = -1, map_type_id;
const struct bpf_map *map;
struct bpf_program *prog;
struct btf *btf;
const struct btf_type *map_type, *var_type;
const struct btf_var_secinfo *var;
struct stat st;
if (!REQ_ARGS(1)) {
usage();
return -1;
}
file = GET_ARG();
while (argc) {
if (!REQ_ARGS(2))
return -1;
if (is_prefix(*argv, "name")) {
NEXT_ARG();
if (obj_name[0] != '\0') {
p_err("object name already specified");
return -1;
}
strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
} else {
p_err("unknown arg %s", *argv);
return -1;
}
NEXT_ARG();
}
if (argc) {
p_err("extra unknown arguments");
return -1;
}
if (use_loader) {
p_err("cannot use loader for subskeletons");
return -1;
}
if (stat(file, &st)) {
p_err("failed to stat() %s: %s", file, strerror(errno));
return -1;
}
file_sz = st.st_size;
mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
fd = open(file, O_RDONLY);
if (fd < 0) {
p_err("failed to open() %s: %s", file, strerror(errno));
return -1;
}
obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
if (obj_data == MAP_FAILED) {
obj_data = NULL;
p_err("failed to mmap() %s: %s", file, strerror(errno));
goto out;
}
if (obj_name[0] == '\0')
get_obj_name(obj_name, file);
/* The empty object name allows us to use bpf_map__name and produce
* ELF section names out of it. (".data" instead of "obj.data")
*/
opts.object_name = "";
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
if (!obj) {
char err_buf[256];
libbpf_strerror(errno, err_buf, sizeof(err_buf));
p_err("failed to open BPF object file: %s", err_buf);
obj = NULL;
goto out;
}
btf = bpf_object__btf(obj);
if (!btf) {
err = -1;
p_err("need btf type information for %s", obj_name);
goto out;
}
bpf_object__for_each_program(prog, obj) {
prog_cnt++;
}
/* First, count how many variables we have to find.
* We need this in advance so the subskel can allocate the right
* amount of storage.
*/
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
/* Also count all maps that have a name */
map_cnt++;
if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
map_type_id = bpf_map__btf_value_type_id(map);
if (map_type_id <= 0) {
err = map_type_id;
goto out;
}
map_type = btf__type_by_id(btf, map_type_id);
var = btf_var_secinfos(map_type);
len = btf_vlen(map_type);
for (i = 0; i < len; i++, var++) {
var_type = btf__type_by_id(btf, var->type);
if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
continue;
var_cnt++;
}
}
get_header_guard(header_guard, obj_name, "SUBSKEL_H");
codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
/* THIS FILE IS AUTOGENERATED! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
#include <errno.h> \n\
#include <stdlib.h> \n\
#include <bpf/libbpf.h> \n\
\n\
struct %1$s { \n\
struct bpf_object *obj; \n\
struct bpf_object_subskeleton *subskel; \n\
", obj_name, header_guard);
if (map_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_map(map, obj) {
if (!get_map_ident(map, ident, sizeof(ident)))
continue;
printf("\t\tstruct bpf_map *%s;\n", ident);
}
printf("\t} maps;\n");
}
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
printf("\t\tstruct bpf_program *%s;\n",
bpf_program__name(prog));
}
printf("\t} progs;\n");
}
err = codegen_subskel_datasecs(obj, obj_name);
if (err)
goto out;
/* emit code that will allocate enough storage for all symbols */
codegen("\
\n\
\n\
#ifdef __cplusplus \n\
static inline struct %1$s *open(const struct bpf_object *src);\n\
static inline void destroy(struct %1$s *skel); \n\
#endif /* __cplusplus */ \n\
}; \n\
\n\
static inline void \n\
%1$s__destroy(struct %1$s *skel) \n\
{ \n\
if (!skel) \n\
return; \n\
if (skel->subskel) \n\
bpf_object__destroy_subskeleton(skel->subskel);\n\
free(skel); \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open(const struct bpf_object *src) \n\
{ \n\
struct %1$s *obj; \n\
struct bpf_object_subskeleton *s; \n\
int err; \n\
\n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
if (!obj) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
if (!s) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
s->sz = sizeof(*s); \n\
s->obj = src; \n\
s->var_skel_sz = sizeof(*s->vars); \n\
obj->subskel = s; \n\
\n\
/* vars */ \n\
s->var_cnt = %2$d; \n\
s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
if (!s->vars) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
",
obj_name, var_cnt
);
/* walk through each symbol and emit the runtime representation */
bpf_object__for_each_map(map, obj) {
if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
continue;
map_type_id = bpf_map__btf_value_type_id(map);
if (map_type_id <= 0)
/* skip over internal maps with no type*/
continue;
map_type = btf__type_by_id(btf, map_type_id);
var = btf_var_secinfos(map_type);
len = btf_vlen(map_type);
for (i = 0; i < len; i++, var++) {
var_type = btf__type_by_id(btf, var->type);
var_name = btf__name_by_offset(btf, var_type->name_off);
if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
continue;
/* Note that we use the dot prefix in .data as the
* field access operator i.e. maps%s becomes maps.data
*/
codegen("\
\n\
\n\
s->vars[%3$d].name = \"%1$s\"; \n\
s->vars[%3$d].map = &obj->maps.%2$s; \n\
s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
", var_name, ident, var_idx);
var_idx++;
}
}
codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
codegen("\
\n\
\n\
err = bpf_object__open_subskeleton(s); \n\
if (err) \n\
goto err; \n\
\n\
return obj; \n\
err: \n\
%1$s__destroy(obj); \n\
errno = -err; \n\
return NULL; \n\
} \n\
\n\
#ifdef __cplusplus \n\
struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
#endif /* __cplusplus */ \n\
\n\
#endif /* %2$s */ \n\
",
obj_name, header_guard);
err = 0;
out:
bpf_object__close(obj);
if (obj_data)
munmap(obj_data, mmap_sz);
close(fd);
return err;
}
static int do_object(int argc, char **argv)
{
struct bpf_linker *linker;
const char *output_file, *file;
int err = 0;
if (!REQ_ARGS(2)) {
usage();
return -1;
}
output_file = GET_ARG();
linker = bpf_linker__new(output_file, NULL);
if (!linker) {
p_err("failed to create BPF linker instance");
return -1;
}
while (argc) {
file = GET_ARG();
err = bpf_linker__add_file(linker, file, NULL);
if (err) {
p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
goto out;
}
}
err = bpf_linker__finalize(linker);
if (err) {
p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
goto out;
}
err = 0;
out:
bpf_linker__free(linker);
return err;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
" %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
" %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
" %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-L|--use-loader} }\n"
"",
bin_name, "gen");
return 0;
}
static int btf_save_raw(const struct btf *btf, const char *path)
{
const void *data;
FILE *f = NULL;
__u32 data_sz;
int err = 0;
data = btf__raw_data(btf, &data_sz);
if (!data)
return -ENOMEM;
f = fopen(path, "wb");
if (!f)
return -errno;
if (fwrite(data, 1, data_sz, f) != data_sz)
err = -errno;
fclose(f);
return err;
}
struct btfgen_info {
struct btf *src_btf;
struct btf *marked_btf; /* btf structure used to mark used types */
};
static size_t btfgen_hash_fn(long key, void *ctx)
{
return key;
}
static bool btfgen_equal_fn(long k1, long k2, void *ctx)
{
return k1 == k2;
}
static void btfgen_free_info(struct btfgen_info *info)
{
if (!info)
return;
btf__free(info->src_btf);
btf__free(info->marked_btf);
free(info);
}
static struct btfgen_info *
btfgen_new_info(const char *targ_btf_path)
{
struct btfgen_info *info;
int err;
info = calloc(1, sizeof(*info));
if (!info)
return NULL;
info->src_btf = btf__parse(targ_btf_path, NULL);
if (!info->src_btf) {
err = -errno;
p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
goto err_out;
}
info->marked_btf = btf__parse(targ_btf_path, NULL);
if (!info->marked_btf) {
err = -errno;
p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
goto err_out;
}
return info;
err_out:
btfgen_free_info(info);
errno = -err;
return NULL;
}
#define MARKED UINT32_MAX
static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
{
const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
struct btf_member *m = btf_members(t) + idx;
m->name_off = MARKED;
}
static int
btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
{
const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
struct btf_type *cloned_type;
struct btf_param *param;
struct btf_array *array;
int err, i;
if (type_id == 0)
return 0;
/* mark type on cloned BTF as used */
cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
cloned_type->name_off = MARKED;
/* recursively mark other types needed by it */
switch (btf_kind(btf_type)) {
case BTF_KIND_UNKN:
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
break;
case BTF_KIND_PTR:
if (follow_pointers) {
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
if (err)
return err;
}
break;
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VOLATILE:
case BTF_KIND_TYPEDEF:
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
if (err)
return err;
break;
case BTF_KIND_ARRAY:
array = btf_array(btf_type);
/* mark array type */
err = btfgen_mark_type(info, array->type, follow_pointers);
/* mark array's index type */
err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
if (err)
return err;
break;
case BTF_KIND_FUNC_PROTO:
/* mark ret type */
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
if (err)
return err;
/* mark parameters types */
param = btf_params(btf_type);
for (i = 0; i < btf_vlen(btf_type); i++) {
err = btfgen_mark_type(info, param->type, follow_pointers);
if (err)
return err;
param++;
}
break;
/* tells if some other type needs to be handled */
default:
p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
return -EINVAL;
}
return 0;
}
static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
{
struct btf *btf = info->src_btf;
const struct btf_type *btf_type;
struct btf_member *btf_member;
struct btf_array *array;
unsigned int type_id = targ_spec->root_type_id;
int idx, err;
/* mark root type */
btf_type = btf__type_by_id(btf, type_id);
err = btfgen_mark_type(info, type_id, false);
if (err)
return err;
/* mark types for complex types (arrays, unions, structures) */
for (int i = 1; i < targ_spec->raw_len; i++) {
/* skip typedefs and mods */
while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
type_id = btf_type->type;
btf_type = btf__type_by_id(btf, type_id);
}
switch (btf_kind(btf_type)) {
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
idx = targ_spec->raw_spec[i];
btf_member = btf_members(btf_type) + idx;
/* mark member */
btfgen_mark_member(info, type_id, idx);
/* mark member's type */
type_id = btf_member->type;
btf_type = btf__type_by_id(btf, type_id);
err = btfgen_mark_type(info, type_id, false);
if (err)
return err;
break;
case BTF_KIND_ARRAY:
array = btf_array(btf_type);
type_id = array->type;
btf_type = btf__type_by_id(btf, type_id);
break;
default:
p_err("unsupported kind: %s (%d)",
btf_kind_str(btf_type), btf_type->type);
return -EINVAL;
}
}
return 0;
}
/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
* this function does not rely on the target spec for inferring members, but
* uses the associated BTF.
*
* The `behind_ptr` argument is used to stop marking of composite types reached
* through a pointer. This way, we can keep BTF size in check while providing
* reasonable match semantics.
*/
static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
{
const struct btf_type *btf_type;
struct btf *btf = info->src_btf;
struct btf_type *cloned_type;
int i, err;
if (type_id == 0)
return 0;
btf_type = btf__type_by_id(btf, type_id);
/* mark type on cloned BTF as used */
cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
cloned_type->name_off = MARKED;
switch (btf_kind(btf_type)) {
case BTF_KIND_UNKN:
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
struct btf_member *m = btf_members(btf_type);
__u16 vlen = btf_vlen(btf_type);
if (behind_ptr)
break;
for (i = 0; i < vlen; i++, m++) {
/* mark member */
btfgen_mark_member(info, type_id, i);
/* mark member's type */
err = btfgen_mark_type_match(info, m->type, false);
if (err)
return err;
}
break;
}
case BTF_KIND_CONST:
case BTF_KIND_FWD:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
case BTF_KIND_PTR:
return btfgen_mark_type_match(info, btf_type->type, true);
case BTF_KIND_ARRAY: {
struct btf_array *array;
array = btf_array(btf_type);
/* mark array type */
err = btfgen_mark_type_match(info, array->type, false);
/* mark array's index type */
err = err ? : btfgen_mark_type_match(info, array->index_type, false);
if (err)
return err;
break;
}
case BTF_KIND_FUNC_PROTO: {
__u16 vlen = btf_vlen(btf_type);
struct btf_param *param;
/* mark ret type */
err = btfgen_mark_type_match(info, btf_type->type, false);
if (err)
return err;
/* mark parameters types */
param = btf_params(btf_type);
for (i = 0; i < vlen; i++) {
err = btfgen_mark_type_match(info, param->type, false);
if (err)
return err;
param++;
}
break;
}
/* tells if some other type needs to be handled */
default:
p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
return -EINVAL;
}
return 0;
}
/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
* this function does not rely on the target spec for inferring members, but
* uses the associated BTF.
*/
static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
{
return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
}
static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
{
return btfgen_mark_type(info, targ_spec->root_type_id, true);
}
static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
{
return btfgen_mark_type(info, targ_spec->root_type_id, false);
}
static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
{
switch (res->relo_kind) {
case BPF_CORE_FIELD_BYTE_OFFSET:
case BPF_CORE_FIELD_BYTE_SIZE:
case BPF_CORE_FIELD_EXISTS:
case BPF_CORE_FIELD_SIGNED:
case BPF_CORE_FIELD_LSHIFT_U64:
case BPF_CORE_FIELD_RSHIFT_U64:
return btfgen_record_field_relo(info, res);
case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
return 0;
case BPF_CORE_TYPE_ID_TARGET:
case BPF_CORE_TYPE_EXISTS:
case BPF_CORE_TYPE_SIZE:
return btfgen_record_type_relo(info, res);
case BPF_CORE_TYPE_MATCHES:
return btfgen_record_type_match_relo(info, res);
case BPF_CORE_ENUMVAL_EXISTS:
case BPF_CORE_ENUMVAL_VALUE:
return btfgen_record_enumval_relo(info, res);
default:
return -EINVAL;
}
}
static struct bpf_core_cand_list *
btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
{
const struct btf_type *local_type;
struct bpf_core_cand_list *cands = NULL;
struct bpf_core_cand local_cand = {};
size_t local_essent_len;
const char *local_name;
int err;
local_cand.btf = local_btf;
local_cand.id = local_id;
local_type = btf__type_by_id(local_btf, local_id);
if (!local_type) {
err = -EINVAL;
goto err_out;
}
local_name = btf__name_by_offset(local_btf, local_type->name_off);
if (!local_name) {
err = -EINVAL;
goto err_out;
}
local_essent_len = bpf_core_essential_name_len(local_name);
cands = calloc(1, sizeof(*cands));
if (!cands)
return NULL;
err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
if (err)
goto err_out;
return cands;
err_out:
bpf_core_free_cands(cands);
errno = -err;
return NULL;
}
/* Record relocation information for a single BPF object */
static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
{
const struct btf_ext_info_sec *sec;
const struct bpf_core_relo *relo;
const struct btf_ext_info *seg;
struct hashmap_entry *entry;
struct hashmap *cand_cache = NULL;
struct btf_ext *btf_ext = NULL;
unsigned int relo_idx;
struct btf *btf = NULL;
size_t i;
int err;
btf = btf__parse(obj_path, &btf_ext);
if (!btf) {
err = -errno;
p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
return err;
}
if (!btf_ext) {
p_err("failed to parse BPF object '%s': section %s not found",
obj_path, BTF_EXT_ELF_SEC);
err = -EINVAL;
goto out;
}
if (btf_ext->core_relo_info.len == 0) {
err = 0;
goto out;
}
cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
if (IS_ERR(cand_cache)) {
err = PTR_ERR(cand_cache);
goto out;
}
seg = &btf_ext->core_relo_info;
for_each_btf_ext_sec(seg, sec) {
for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
struct bpf_core_spec specs_scratch[3] = {};
struct bpf_core_relo_res targ_res = {};
struct bpf_core_cand_list *cands = NULL;
const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
!hashmap__find(cand_cache, relo->type_id, &cands)) {
cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
if (!cands) {
err = -errno;
goto out;
}
err = hashmap__set(cand_cache, relo->type_id, cands,
NULL, NULL);
if (err)
goto out;
}
err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
specs_scratch, &targ_res);
if (err)
goto out;
/* specs_scratch[2] is the target spec */
err = btfgen_record_reloc(info, &specs_scratch[2]);
if (err)
goto out;
}
}
out:
btf__free(btf);
btf_ext__free(btf_ext);
if (!IS_ERR_OR_NULL(cand_cache)) {
hashmap__for_each_entry(cand_cache, entry, i) {
bpf_core_free_cands(entry->pvalue);
}
hashmap__free(cand_cache);
}
return err;
}
static int btfgen_remap_id(__u32 *type_id, void *ctx)
{
unsigned int *ids = ctx;
*type_id = ids[*type_id];
return 0;
}
/* Generate BTF from relocation information previously recorded */
static struct btf *btfgen_get_btf(struct btfgen_info *info)
{
struct btf *btf_new = NULL;
unsigned int *ids = NULL;
unsigned int i, n = btf__type_cnt(info->marked_btf);
int err = 0;
btf_new = btf__new_empty();
if (!btf_new) {
err = -errno;
goto err_out;
}
ids = calloc(n, sizeof(*ids));
if (!ids) {
err = -errno;
goto err_out;
}
/* first pass: add all marked types to btf_new and add their new ids to the ids map */
for (i = 1; i < n; i++) {
const struct btf_type *cloned_type, *type;
const char *name;
int new_id;
cloned_type = btf__type_by_id(info->marked_btf, i);
if (cloned_type->name_off != MARKED)
continue;
type = btf__type_by_id(info->src_btf, i);
/* add members for struct and union */
if (btf_is_composite(type)) {
struct btf_member *cloned_m, *m;
unsigned short vlen;
int idx_src;
name = btf__str_by_offset(info->src_btf, type->name_off);
if (btf_is_struct(type))
err = btf__add_struct(btf_new, name, type->size);
else
err = btf__add_union(btf_new, name, type->size);
if (err < 0)
goto err_out;
new_id = err;
cloned_m = btf_members(cloned_type);
m = btf_members(type);
vlen = btf_vlen(cloned_type);
for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
/* add only members that are marked as used */
if (cloned_m->name_off != MARKED)
continue;
name = btf__str_by_offset(info->src_btf, m->name_off);
err = btf__add_field(btf_new, name, m->type,
btf_member_bit_offset(cloned_type, idx_src),
btf_member_bitfield_size(cloned_type, idx_src));
if (err < 0)
goto err_out;
}
} else {
err = btf__add_type(btf_new, info->src_btf, type);
if (err < 0)
goto err_out;
new_id = err;
}
/* add ID mapping */
ids[i] = new_id;
}
/* second pass: fix up type ids */
for (i = 1; i < btf__type_cnt(btf_new); i++) {
struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
if (err)
goto err_out;
}
free(ids);
return btf_new;
err_out:
btf__free(btf_new);
free(ids);
errno = -err;
return NULL;
}
/* Create minimized BTF file for a set of BPF objects.
*
* The BTFGen algorithm is divided in two main parts: (1) collect the
* BTF types that are involved in relocations and (2) generate the BTF
* object using the collected types.
*
* In order to collect the types involved in the relocations, we parse
* the BTF and BTF.ext sections of the BPF objects and use
* bpf_core_calc_relo_insn() to get the target specification, this
* indicates how the types and fields are used in a relocation.
*
* Types are recorded in different ways according to the kind of the
* relocation. For field-based relocations only the members that are
* actually used are saved in order to reduce the size of the generated
* BTF file. For type-based relocations empty struct / unions are
* generated and for enum-based relocations the whole type is saved.
*
* The second part of the algorithm generates the BTF object. It creates
* an empty BTF object and fills it with the types recorded in the
* previous step. This function takes care of only adding the structure
* and union members that were marked as used and it also fixes up the
* type IDs on the generated BTF object.
*/
static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
{
struct btfgen_info *info;
struct btf *btf_new = NULL;
int err, i;
info = btfgen_new_info(src_btf);
if (!info) {
err = -errno;
p_err("failed to allocate info structure: %s", strerror(errno));
goto out;
}
for (i = 0; objspaths[i] != NULL; i++) {
err = btfgen_record_obj(info, objspaths[i]);
if (err) {
p_err("error recording relocations for %s: %s", objspaths[i],
strerror(errno));
goto out;
}
}
btf_new = btfgen_get_btf(info);
if (!btf_new) {
err = -errno;
p_err("error generating BTF: %s", strerror(errno));
goto out;
}
err = btf_save_raw(btf_new, dst_btf);
if (err) {
p_err("error saving btf file: %s", strerror(errno));
goto out;
}
out:
btf__free(btf_new);
btfgen_free_info(info);
return err;
}
static int do_min_core_btf(int argc, char **argv)
{
const char *input, *output, **objs;
int i, err;
if (!REQ_ARGS(3)) {
usage();
return -1;
}
input = GET_ARG();
output = GET_ARG();
objs = (const char **) calloc(argc + 1, sizeof(*objs));
if (!objs) {
p_err("failed to allocate array for object names");
return -ENOMEM;
}
i = 0;
while (argc)
objs[i++] = GET_ARG();
err = minimize_btf(input, output, objs);
free(objs);
return err;
}
static const struct cmd cmds[] = {
{ "object", do_object },
{ "skeleton", do_skeleton },
{ "subskeleton", do_subskeleton },
{ "min_core_btf", do_min_core_btf},
{ "help", do_help },
{ 0 }
};
int do_gen(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/gen.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2019 Netronome Systems, Inc. */
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include <net/if.h>
#ifdef USE_LIBCAP
#include <sys/capability.h>
#endif
#include <sys/utsname.h>
#include <sys/vfs.h>
#include <linux/filter.h>
#include <linux/limits.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <zlib.h>
#include "main.h"
#ifndef PROC_SUPER_MAGIC
# define PROC_SUPER_MAGIC 0x9fa0
#endif
enum probe_component {
COMPONENT_UNSPEC,
COMPONENT_KERNEL,
COMPONENT_DEVICE,
};
#define BPF_HELPER_MAKE_ENTRY(name) [BPF_FUNC_ ## name] = "bpf_" # name
static const char * const helper_name[] = {
__BPF_FUNC_MAPPER(BPF_HELPER_MAKE_ENTRY)
};
#undef BPF_HELPER_MAKE_ENTRY
static bool full_mode;
#ifdef USE_LIBCAP
static bool run_as_unprivileged;
#endif
/* Miscellaneous utility functions */
static bool grep(const char *buffer, const char *pattern)
{
return !!strstr(buffer, pattern);
}
static bool check_procfs(void)
{
struct statfs st_fs;
if (statfs("/proc", &st_fs) < 0)
return false;
if ((unsigned long)st_fs.f_type != PROC_SUPER_MAGIC)
return false;
return true;
}
static void uppercase(char *str, size_t len)
{
size_t i;
for (i = 0; i < len && str[i] != '\0'; i++)
str[i] = toupper(str[i]);
}
/* Printing utility functions */
static void
print_bool_feature(const char *feat_name, const char *plain_name,
const char *define_name, bool res, const char *define_prefix)
{
if (json_output)
jsonw_bool_field(json_wtr, feat_name, res);
else if (define_prefix)
printf("#define %s%sHAVE_%s\n", define_prefix,
res ? "" : "NO_", define_name);
else
printf("%s is %savailable\n", plain_name, res ? "" : "NOT ");
}
static void print_kernel_option(const char *name, const char *value,
const char *define_prefix)
{
char *endptr;
int res;
if (json_output) {
if (!value) {
jsonw_null_field(json_wtr, name);
return;
}
errno = 0;
res = strtol(value, &endptr, 0);
if (!errno && *endptr == '\n')
jsonw_int_field(json_wtr, name, res);
else
jsonw_string_field(json_wtr, name, value);
} else if (define_prefix) {
if (value)
printf("#define %s%s %s\n", define_prefix,
name, value);
else
printf("/* %s%s is not set */\n", define_prefix, name);
} else {
if (value)
printf("%s is set to %s\n", name, value);
else
printf("%s is not set\n", name);
}
}
static void
print_start_section(const char *json_title, const char *plain_title,
const char *define_comment, const char *define_prefix)
{
if (json_output) {
jsonw_name(json_wtr, json_title);
jsonw_start_object(json_wtr);
} else if (define_prefix) {
printf("%s\n", define_comment);
} else {
printf("%s\n", plain_title);
}
}
static void print_end_section(void)
{
if (json_output)
jsonw_end_object(json_wtr);
else
printf("\n");
}
/* Probing functions */
static int get_vendor_id(int ifindex)
{
char ifname[IF_NAMESIZE], path[64], buf[8];
ssize_t len;
int fd;
if (!if_indextoname(ifindex, ifname))
return -1;
snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return -1;
len = read(fd, buf, sizeof(buf));
close(fd);
if (len < 0)
return -1;
if (len >= (ssize_t)sizeof(buf))
return -1;
buf[len] = '\0';
return strtol(buf, NULL, 0);
}
static long read_procfs(const char *path)
{
char *endptr, *line = NULL;
size_t len = 0;
FILE *fd;
long res;
fd = fopen(path, "r");
if (!fd)
return -1;
res = getline(&line, &len, fd);
fclose(fd);
if (res < 0)
return -1;
errno = 0;
res = strtol(line, &endptr, 10);
if (errno || *line == '\0' || *endptr != '\n')
res = -1;
free(line);
return res;
}
static void probe_unprivileged_disabled(void)
{
long res;
/* No support for C-style ouptut */
res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled");
if (json_output) {
jsonw_int_field(json_wtr, "unprivileged_bpf_disabled", res);
} else {
switch (res) {
case 0:
printf("bpf() syscall for unprivileged users is enabled\n");
break;
case 1:
printf("bpf() syscall restricted to privileged users (without recovery)\n");
break;
case 2:
printf("bpf() syscall restricted to privileged users (admin can change)\n");
break;
case -1:
printf("Unable to retrieve required privileges for bpf() syscall\n");
break;
default:
printf("bpf() syscall restriction has unknown value %ld\n", res);
}
}
}
static void probe_jit_enable(void)
{
long res;
/* No support for C-style ouptut */
res = read_procfs("/proc/sys/net/core/bpf_jit_enable");
if (json_output) {
jsonw_int_field(json_wtr, "bpf_jit_enable", res);
} else {
switch (res) {
case 0:
printf("JIT compiler is disabled\n");
break;
case 1:
printf("JIT compiler is enabled\n");
break;
case 2:
printf("JIT compiler is enabled with debugging traces in kernel logs\n");
break;
case -1:
printf("Unable to retrieve JIT-compiler status\n");
break;
default:
printf("JIT-compiler status has unknown value %ld\n",
res);
}
}
}
static void probe_jit_harden(void)
{
long res;
/* No support for C-style ouptut */
res = read_procfs("/proc/sys/net/core/bpf_jit_harden");
if (json_output) {
jsonw_int_field(json_wtr, "bpf_jit_harden", res);
} else {
switch (res) {
case 0:
printf("JIT compiler hardening is disabled\n");
break;
case 1:
printf("JIT compiler hardening is enabled for unprivileged users\n");
break;
case 2:
printf("JIT compiler hardening is enabled for all users\n");
break;
case -1:
printf("Unable to retrieve JIT hardening status\n");
break;
default:
printf("JIT hardening status has unknown value %ld\n",
res);
}
}
}
static void probe_jit_kallsyms(void)
{
long res;
/* No support for C-style ouptut */
res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms");
if (json_output) {
jsonw_int_field(json_wtr, "bpf_jit_kallsyms", res);
} else {
switch (res) {
case 0:
printf("JIT compiler kallsyms exports are disabled\n");
break;
case 1:
printf("JIT compiler kallsyms exports are enabled for root\n");
break;
case -1:
printf("Unable to retrieve JIT kallsyms export status\n");
break;
default:
printf("JIT kallsyms exports status has unknown value %ld\n", res);
}
}
}
static void probe_jit_limit(void)
{
long res;
/* No support for C-style ouptut */
res = read_procfs("/proc/sys/net/core/bpf_jit_limit");
if (json_output) {
jsonw_int_field(json_wtr, "bpf_jit_limit", res);
} else {
switch (res) {
case -1:
printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
break;
default:
printf("Global memory limit for JIT compiler for unprivileged users is %ld bytes\n", res);
}
}
}
static bool read_next_kernel_config_option(gzFile file, char *buf, size_t n,
char **value)
{
char *sep;
while (gzgets(file, buf, n)) {
if (strncmp(buf, "CONFIG_", 7))
continue;
sep = strchr(buf, '=');
if (!sep)
continue;
/* Trim ending '\n' */
buf[strlen(buf) - 1] = '\0';
/* Split on '=' and ensure that a value is present. */
*sep = '\0';
if (!sep[1])
continue;
*value = sep + 1;
return true;
}
return false;
}
static void probe_kernel_image_config(const char *define_prefix)
{
static const struct {
const char * const name;
bool macro_dump;
} options[] = {
/* Enable BPF */
{ "CONFIG_BPF", },
/* Enable bpf() syscall */
{ "CONFIG_BPF_SYSCALL", },
/* Does selected architecture support eBPF JIT compiler */
{ "CONFIG_HAVE_EBPF_JIT", },
/* Compile eBPF JIT compiler */
{ "CONFIG_BPF_JIT", },
/* Avoid compiling eBPF interpreter (use JIT only) */
{ "CONFIG_BPF_JIT_ALWAYS_ON", },
/* Kernel BTF debug information available */
{ "CONFIG_DEBUG_INFO_BTF", },
/* Kernel module BTF debug information available */
{ "CONFIG_DEBUG_INFO_BTF_MODULES", },
/* cgroups */
{ "CONFIG_CGROUPS", },
/* BPF programs attached to cgroups */
{ "CONFIG_CGROUP_BPF", },
/* bpf_get_cgroup_classid() helper */
{ "CONFIG_CGROUP_NET_CLASSID", },
/* bpf_skb_{,ancestor_}cgroup_id() helpers */
{ "CONFIG_SOCK_CGROUP_DATA", },
/* Tracing: attach BPF to kprobes, tracepoints, etc. */
{ "CONFIG_BPF_EVENTS", },
/* Kprobes */
{ "CONFIG_KPROBE_EVENTS", },
/* Uprobes */
{ "CONFIG_UPROBE_EVENTS", },
/* Tracepoints */
{ "CONFIG_TRACING", },
/* Syscall tracepoints */
{ "CONFIG_FTRACE_SYSCALLS", },
/* bpf_override_return() helper support for selected arch */
{ "CONFIG_FUNCTION_ERROR_INJECTION", },
/* bpf_override_return() helper */
{ "CONFIG_BPF_KPROBE_OVERRIDE", },
/* Network */
{ "CONFIG_NET", },
/* AF_XDP sockets */
{ "CONFIG_XDP_SOCKETS", },
/* BPF_PROG_TYPE_LWT_* and related helpers */
{ "CONFIG_LWTUNNEL_BPF", },
/* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */
{ "CONFIG_NET_ACT_BPF", },
/* BPF_PROG_TYPE_SCHED_CLS, TC filters */
{ "CONFIG_NET_CLS_BPF", },
/* TC clsact qdisc */
{ "CONFIG_NET_CLS_ACT", },
/* Ingress filtering with TC */
{ "CONFIG_NET_SCH_INGRESS", },
/* bpf_skb_get_xfrm_state() helper */
{ "CONFIG_XFRM", },
/* bpf_get_route_realm() helper */
{ "CONFIG_IP_ROUTE_CLASSID", },
/* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */
{ "CONFIG_IPV6_SEG6_BPF", },
/* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */
{ "CONFIG_BPF_LIRC_MODE2", },
/* BPF stream parser and BPF socket maps */
{ "CONFIG_BPF_STREAM_PARSER", },
/* xt_bpf module for passing BPF programs to netfilter */
{ "CONFIG_NETFILTER_XT_MATCH_BPF", },
/* bpfilter back-end for iptables */
{ "CONFIG_BPFILTER", },
/* bpftilter module with "user mode helper" */
{ "CONFIG_BPFILTER_UMH", },
/* test_bpf module for BPF tests */
{ "CONFIG_TEST_BPF", },
/* Misc configs useful in BPF C programs */
/* jiffies <-> sec conversion for bpf_jiffies64() helper */
{ "CONFIG_HZ", true, }
};
char *values[ARRAY_SIZE(options)] = { };
struct utsname utsn;
char path[PATH_MAX];
gzFile file = NULL;
char buf[4096];
char *value;
size_t i;
if (!uname(&utsn)) {
snprintf(path, sizeof(path), "/boot/config-%s", utsn.release);
/* gzopen also accepts uncompressed files. */
file = gzopen(path, "r");
}
if (!file) {
/* Some distributions build with CONFIG_IKCONFIG=y and put the
* config file at /proc/config.gz.
*/
file = gzopen("/proc/config.gz", "r");
}
if (!file) {
p_info("skipping kernel config, can't open file: %s",
strerror(errno));
goto end_parse;
}
/* Sanity checks */
if (!gzgets(file, buf, sizeof(buf)) ||
!gzgets(file, buf, sizeof(buf))) {
p_info("skipping kernel config, can't read from file: %s",
strerror(errno));
goto end_parse;
}
if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) {
p_info("skipping kernel config, can't find correct file");
goto end_parse;
}
while (read_next_kernel_config_option(file, buf, sizeof(buf), &value)) {
for (i = 0; i < ARRAY_SIZE(options); i++) {
if ((define_prefix && !options[i].macro_dump) ||
values[i] || strcmp(buf, options[i].name))
continue;
values[i] = strdup(value);
}
}
for (i = 0; i < ARRAY_SIZE(options); i++) {
if (define_prefix && !options[i].macro_dump)
continue;
print_kernel_option(options[i].name, values[i], define_prefix);
free(values[i]);
}
end_parse:
if (file)
gzclose(file);
}
static bool probe_bpf_syscall(const char *define_prefix)
{
bool res;
bpf_prog_load(BPF_PROG_TYPE_UNSPEC, NULL, NULL, NULL, 0, NULL);
res = (errno != ENOSYS);
print_bool_feature("have_bpf_syscall",
"bpf() syscall",
"BPF_SYSCALL",
res, define_prefix);
return res;
}
static bool
probe_prog_load_ifindex(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insns_cnt,
char *log_buf, size_t log_buf_sz,
__u32 ifindex)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_buf = log_buf,
.log_size = log_buf_sz,
.log_level = log_buf ? 1 : 0,
.prog_ifindex = ifindex,
);
int fd;
errno = 0;
fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
if (fd >= 0)
close(fd);
return fd >= 0 && errno != EINVAL && errno != EOPNOTSUPP;
}
static bool probe_prog_type_ifindex(enum bpf_prog_type prog_type, __u32 ifindex)
{
/* nfp returns -EINVAL on exit(0) with TC offload */
struct bpf_insn insns[2] = {
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN()
};
return probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns),
NULL, 0, ifindex);
}
static void
probe_prog_type(enum bpf_prog_type prog_type, const char *prog_type_str,
bool *supported_types, const char *define_prefix, __u32 ifindex)
{
char feat_name[128], plain_desc[128], define_name[128];
const char *plain_comment = "eBPF program_type ";
size_t maxlen;
bool res;
if (ifindex) {
switch (prog_type) {
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_XDP:
break;
default:
return;
}
res = probe_prog_type_ifindex(prog_type, ifindex);
} else {
res = libbpf_probe_bpf_prog_type(prog_type, NULL) > 0;
}
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for unprivileged users
* check that we did not fail because of insufficient permissions
*/
if (run_as_unprivileged && errno == EPERM)
res = false;
#endif
supported_types[prog_type] |= res;
maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
if (strlen(prog_type_str) > maxlen) {
p_info("program type name too long");
return;
}
sprintf(feat_name, "have_%s_prog_type", prog_type_str);
sprintf(define_name, "%s_prog_type", prog_type_str);
uppercase(define_name, sizeof(define_name));
sprintf(plain_desc, "%s%s", plain_comment, prog_type_str);
print_bool_feature(feat_name, plain_desc, define_name, res,
define_prefix);
}
static bool probe_map_type_ifindex(enum bpf_map_type map_type, __u32 ifindex)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
int key_size, value_size, max_entries;
int fd;
opts.map_ifindex = ifindex;
key_size = sizeof(__u32);
value_size = sizeof(__u32);
max_entries = 1;
fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries,
&opts);
if (fd >= 0)
close(fd);
return fd >= 0;
}
static void
probe_map_type(enum bpf_map_type map_type, char const *map_type_str,
const char *define_prefix, __u32 ifindex)
{
char feat_name[128], plain_desc[128], define_name[128];
const char *plain_comment = "eBPF map_type ";
size_t maxlen;
bool res;
if (ifindex) {
switch (map_type) {
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_ARRAY:
break;
default:
return;
}
res = probe_map_type_ifindex(map_type, ifindex);
} else {
res = libbpf_probe_bpf_map_type(map_type, NULL) > 0;
}
/* Probe result depends on the success of map creation, no additional
* check required for unprivileged users
*/
maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
if (strlen(map_type_str) > maxlen) {
p_info("map type name too long");
return;
}
sprintf(feat_name, "have_%s_map_type", map_type_str);
sprintf(define_name, "%s_map_type", map_type_str);
uppercase(define_name, sizeof(define_name));
sprintf(plain_desc, "%s%s", plain_comment, map_type_str);
print_bool_feature(feat_name, plain_desc, define_name, res,
define_prefix);
}
static bool
probe_helper_ifindex(enum bpf_func_id id, enum bpf_prog_type prog_type,
__u32 ifindex)
{
struct bpf_insn insns[2] = {
BPF_EMIT_CALL(id),
BPF_EXIT_INSN()
};
char buf[4096] = {};
bool res;
probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns), buf,
sizeof(buf), ifindex);
res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
switch (get_vendor_id(ifindex)) {
case 0x19ee: /* Netronome specific */
res = res && !grep(buf, "not supported by FW") &&
!grep(buf, "unsupported function id");
break;
default:
break;
}
return res;
}
static bool
probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
const char *define_prefix, unsigned int id,
const char *ptype_name, __u32 ifindex)
{
bool res = false;
if (supported_type) {
if (ifindex)
res = probe_helper_ifindex(id, prog_type, ifindex);
else
res = libbpf_probe_bpf_helper(prog_type, id, NULL) > 0;
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for
* unprivileged users check that we did not fail because of
* insufficient permissions
*/
if (run_as_unprivileged && errno == EPERM)
res = false;
#endif
}
if (json_output) {
if (res)
jsonw_string(json_wtr, helper_name[id]);
} else if (define_prefix) {
printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
define_prefix, ptype_name, helper_name[id],
res ? "1" : "0");
} else {
if (res)
printf("\n\t- %s", helper_name[id]);
}
return res;
}
static void
probe_helpers_for_progtype(enum bpf_prog_type prog_type,
const char *prog_type_str, bool supported_type,
const char *define_prefix, __u32 ifindex)
{
char feat_name[128];
unsigned int id;
bool probe_res = false;
if (ifindex)
/* Only test helpers for offload-able program types */
switch (prog_type) {
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_XDP:
break;
default:
return;
}
if (json_output) {
sprintf(feat_name, "%s_available_helpers", prog_type_str);
jsonw_name(json_wtr, feat_name);
jsonw_start_array(json_wtr);
} else if (!define_prefix) {
printf("eBPF helpers supported for program type %s:",
prog_type_str);
}
for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
/* Skip helper functions which emit dmesg messages when not in
* the full mode.
*/
switch (id) {
case BPF_FUNC_trace_printk:
case BPF_FUNC_trace_vprintk:
case BPF_FUNC_probe_write_user:
if (!full_mode)
continue;
fallthrough;
default:
probe_res |= probe_helper_for_progtype(prog_type, supported_type,
define_prefix, id, prog_type_str,
ifindex);
}
}
if (json_output)
jsonw_end_array(json_wtr);
else if (!define_prefix) {
printf("\n");
if (!probe_res) {
if (!supported_type)
printf("\tProgram type not supported\n");
else
printf("\tCould not determine which helpers are available\n");
}
}
}
static void
probe_misc_feature(struct bpf_insn *insns, size_t len,
const char *define_prefix, __u32 ifindex,
const char *feat_name, const char *plain_name,
const char *define_name)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.prog_ifindex = ifindex,
);
bool res;
int fd;
errno = 0;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
insns, len, &opts);
res = fd >= 0 || !errno;
if (fd >= 0)
close(fd);
print_bool_feature(feat_name, plain_name, define_name, res,
define_prefix);
}
/*
* Probe for availability of kernel commit (5.3):
*
* c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
*/
static void probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
{
struct bpf_insn insns[BPF_MAXINSNS + 1];
int i;
for (i = 0; i < BPF_MAXINSNS; i++)
insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
probe_misc_feature(insns, ARRAY_SIZE(insns),
define_prefix, ifindex,
"have_large_insn_limit",
"Large program size limit",
"LARGE_INSN_LIMIT");
}
/*
* Probe for bounded loop support introduced in commit 2589726d12a1
* ("bpf: introduce bounded loops").
*/
static void
probe_bounded_loops(const char *define_prefix, __u32 ifindex)
{
struct bpf_insn insns[4] = {
BPF_MOV64_IMM(BPF_REG_0, 10),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, -2),
BPF_EXIT_INSN()
};
probe_misc_feature(insns, ARRAY_SIZE(insns),
define_prefix, ifindex,
"have_bounded_loops",
"Bounded loop support",
"BOUNDED_LOOPS");
}
/*
* Probe for the v2 instruction set extension introduced in commit 92b31a9af73b
* ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions").
*/
static void
probe_v2_isa_extension(const char *define_prefix, __u32 ifindex)
{
struct bpf_insn insns[4] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN()
};
probe_misc_feature(insns, ARRAY_SIZE(insns),
define_prefix, ifindex,
"have_v2_isa_extension",
"ISA extension v2",
"V2_ISA_EXTENSION");
}
/*
* Probe for the v3 instruction set extension introduced in commit 092ed0968bb6
* ("bpf: verifier support JMP32").
*/
static void
probe_v3_isa_extension(const char *define_prefix, __u32 ifindex)
{
struct bpf_insn insns[4] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP32_IMM(BPF_JLT, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN()
};
probe_misc_feature(insns, ARRAY_SIZE(insns),
define_prefix, ifindex,
"have_v3_isa_extension",
"ISA extension v3",
"V3_ISA_EXTENSION");
}
static void
section_system_config(enum probe_component target, const char *define_prefix)
{
switch (target) {
case COMPONENT_KERNEL:
case COMPONENT_UNSPEC:
print_start_section("system_config",
"Scanning system configuration...",
"/*** Misc kernel config items ***/",
define_prefix);
if (!define_prefix) {
if (check_procfs()) {
probe_unprivileged_disabled();
probe_jit_enable();
probe_jit_harden();
probe_jit_kallsyms();
probe_jit_limit();
} else {
p_info("/* procfs not mounted, skipping related probes */");
}
}
probe_kernel_image_config(define_prefix);
print_end_section();
break;
default:
break;
}
}
static bool section_syscall_config(const char *define_prefix)
{
bool res;
print_start_section("syscall_config",
"Scanning system call availability...",
"/*** System call availability ***/",
define_prefix);
res = probe_bpf_syscall(define_prefix);
print_end_section();
return res;
}
static void
section_program_types(bool *supported_types, const char *define_prefix,
__u32 ifindex)
{
unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
const char *prog_type_str;
print_start_section("program_types",
"Scanning eBPF program types...",
"/*** eBPF program types ***/",
define_prefix);
while (true) {
prog_type++;
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
/* libbpf will return NULL for variants unknown to it. */
if (!prog_type_str)
break;
probe_prog_type(prog_type, prog_type_str, supported_types, define_prefix,
ifindex);
}
print_end_section();
}
static void section_map_types(const char *define_prefix, __u32 ifindex)
{
unsigned int map_type = BPF_MAP_TYPE_UNSPEC;
const char *map_type_str;
print_start_section("map_types",
"Scanning eBPF map types...",
"/*** eBPF map types ***/",
define_prefix);
while (true) {
map_type++;
map_type_str = libbpf_bpf_map_type_str(map_type);
/* libbpf will return NULL for variants unknown to it. */
if (!map_type_str)
break;
probe_map_type(map_type, map_type_str, define_prefix, ifindex);
}
print_end_section();
}
static void
section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex)
{
unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
const char *prog_type_str;
print_start_section("helpers",
"Scanning eBPF helper functions...",
"/*** eBPF helper functions ***/",
define_prefix);
if (define_prefix)
printf("/*\n"
" * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
" * to determine if <helper_name> is available for <prog_type_name>,\n"
" * e.g.\n"
" * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
" * // do stuff with this helper\n"
" * #elif\n"
" * // use a workaround\n"
" * #endif\n"
" */\n"
"#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
" %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
define_prefix, define_prefix, define_prefix,
define_prefix);
while (true) {
prog_type++;
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
/* libbpf will return NULL for variants unknown to it. */
if (!prog_type_str)
break;
probe_helpers_for_progtype(prog_type, prog_type_str,
supported_types[prog_type],
define_prefix,
ifindex);
}
print_end_section();
}
static void section_misc(const char *define_prefix, __u32 ifindex)
{
print_start_section("misc",
"Scanning miscellaneous eBPF features...",
"/*** eBPF misc features ***/",
define_prefix);
probe_large_insn_limit(define_prefix, ifindex);
probe_bounded_loops(define_prefix, ifindex);
probe_v2_isa_extension(define_prefix, ifindex);
probe_v3_isa_extension(define_prefix, ifindex);
print_end_section();
}
#ifdef USE_LIBCAP
#define capability(c) { c, false, #c }
#define capability_msg(a, i) a[i].set ? "" : a[i].name, a[i].set ? "" : ", "
#endif
static int handle_perms(void)
{
#ifdef USE_LIBCAP
struct {
cap_value_t cap;
bool set;
char name[14]; /* strlen("CAP_SYS_ADMIN") */
} bpf_caps[] = {
capability(CAP_SYS_ADMIN),
#ifdef CAP_BPF
capability(CAP_BPF),
capability(CAP_NET_ADMIN),
capability(CAP_PERFMON),
#endif
};
cap_value_t cap_list[ARRAY_SIZE(bpf_caps)];
unsigned int i, nb_bpf_caps = 0;
bool cap_sys_admin_only = true;
cap_flag_value_t val;
int res = -1;
cap_t caps;
caps = cap_get_proc();
if (!caps) {
p_err("failed to get capabilities for process: %s",
strerror(errno));
return -1;
}
#ifdef CAP_BPF
if (CAP_IS_SUPPORTED(CAP_BPF))
cap_sys_admin_only = false;
#endif
for (i = 0; i < ARRAY_SIZE(bpf_caps); i++) {
const char *cap_name = bpf_caps[i].name;
cap_value_t cap = bpf_caps[i].cap;
if (cap_get_flag(caps, cap, CAP_EFFECTIVE, &val)) {
p_err("bug: failed to retrieve %s status: %s", cap_name,
strerror(errno));
goto exit_free;
}
if (val == CAP_SET) {
bpf_caps[i].set = true;
cap_list[nb_bpf_caps++] = cap;
}
if (cap_sys_admin_only)
/* System does not know about CAP_BPF, meaning that
* CAP_SYS_ADMIN is the only capability required. We
* just checked it, break.
*/
break;
}
if ((run_as_unprivileged && !nb_bpf_caps) ||
(!run_as_unprivileged && nb_bpf_caps == ARRAY_SIZE(bpf_caps)) ||
(!run_as_unprivileged && cap_sys_admin_only && nb_bpf_caps)) {
/* We are all good, exit now */
res = 0;
goto exit_free;
}
if (!run_as_unprivileged) {
if (cap_sys_admin_only)
p_err("missing %s, required for full feature probing; run as root or use 'unprivileged'",
bpf_caps[0].name);
else
p_err("missing %s%s%s%s%s%s%s%srequired for full feature probing; run as root or use 'unprivileged'",
capability_msg(bpf_caps, 0),
#ifdef CAP_BPF
capability_msg(bpf_caps, 1),
capability_msg(bpf_caps, 2),
capability_msg(bpf_caps, 3)
#else
"", "", "", "", "", ""
#endif /* CAP_BPF */
);
goto exit_free;
}
/* if (run_as_unprivileged && nb_bpf_caps > 0), drop capabilities. */
if (cap_set_flag(caps, CAP_EFFECTIVE, nb_bpf_caps, cap_list,
CAP_CLEAR)) {
p_err("bug: failed to clear capabilities: %s", strerror(errno));
goto exit_free;
}
if (cap_set_proc(caps)) {
p_err("failed to drop capabilities: %s", strerror(errno));
goto exit_free;
}
res = 0;
exit_free:
if (cap_free(caps) && !res) {
p_err("failed to clear storage object for capabilities: %s",
strerror(errno));
res = -1;
}
return res;
#else
/* Detection assumes user has specific privileges.
* We do not use libcap so let's approximate, and restrict usage to
* root user only.
*/
if (geteuid()) {
p_err("full feature probing requires root privileges");
return -1;
}
return 0;
#endif /* USE_LIBCAP */
}
static int do_probe(int argc, char **argv)
{
enum probe_component target = COMPONENT_UNSPEC;
const char *define_prefix = NULL;
bool supported_types[128] = {};
__u32 ifindex = 0;
char *ifname;
set_max_rlimit();
while (argc) {
if (is_prefix(*argv, "kernel")) {
if (target != COMPONENT_UNSPEC) {
p_err("component to probe already specified");
return -1;
}
target = COMPONENT_KERNEL;
NEXT_ARG();
} else if (is_prefix(*argv, "dev")) {
NEXT_ARG();
if (target != COMPONENT_UNSPEC || ifindex) {
p_err("component to probe already specified");
return -1;
}
if (!REQ_ARGS(1))
return -1;
target = COMPONENT_DEVICE;
ifname = GET_ARG();
ifindex = if_nametoindex(ifname);
if (!ifindex) {
p_err("unrecognized netdevice '%s': %s", ifname,
strerror(errno));
return -1;
}
} else if (is_prefix(*argv, "full")) {
full_mode = true;
NEXT_ARG();
} else if (is_prefix(*argv, "macros") && !define_prefix) {
define_prefix = "";
NEXT_ARG();
} else if (is_prefix(*argv, "prefix")) {
if (!define_prefix) {
p_err("'prefix' argument can only be use after 'macros'");
return -1;
}
if (strcmp(define_prefix, "")) {
p_err("'prefix' already defined");
return -1;
}
NEXT_ARG();
if (!REQ_ARGS(1))
return -1;
define_prefix = GET_ARG();
} else if (is_prefix(*argv, "unprivileged")) {
#ifdef USE_LIBCAP
run_as_unprivileged = true;
NEXT_ARG();
#else
p_err("unprivileged run not supported, recompile bpftool with libcap");
return -1;
#endif
} else {
p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?",
*argv);
return -1;
}
}
/* Full feature detection requires specific privileges.
* Let's approximate, and warn if user is not root.
*/
if (handle_perms())
return -1;
if (json_output) {
define_prefix = NULL;
jsonw_start_object(json_wtr);
}
section_system_config(target, define_prefix);
if (!section_syscall_config(define_prefix))
/* bpf() syscall unavailable, don't probe other BPF features */
goto exit_close_json;
section_program_types(supported_types, define_prefix, ifindex);
section_map_types(define_prefix, ifindex);
section_helpers(supported_types, define_prefix, ifindex);
section_misc(define_prefix, ifindex);
exit_close_json:
if (json_output)
/* End root object */
jsonw_end_object(json_wtr);
return 0;
}
static const char *get_helper_name(unsigned int id)
{
if (id >= ARRAY_SIZE(helper_name))
return NULL;
return helper_name[id];
}
static int do_list_builtins(int argc, char **argv)
{
const char *(*get_name)(unsigned int id);
unsigned int id = 0;
if (argc < 1)
usage();
if (is_prefix(*argv, "prog_types")) {
get_name = (const char *(*)(unsigned int))libbpf_bpf_prog_type_str;
} else if (is_prefix(*argv, "map_types")) {
get_name = (const char *(*)(unsigned int))libbpf_bpf_map_type_str;
} else if (is_prefix(*argv, "attach_types")) {
get_name = (const char *(*)(unsigned int))libbpf_bpf_attach_type_str;
} else if (is_prefix(*argv, "link_types")) {
get_name = (const char *(*)(unsigned int))libbpf_bpf_link_type_str;
} else if (is_prefix(*argv, "helpers")) {
get_name = get_helper_name;
} else {
p_err("expected 'prog_types', 'map_types', 'attach_types', 'link_types' or 'helpers', got: %s", *argv);
return -1;
}
if (json_output)
jsonw_start_array(json_wtr); /* root array */
while (true) {
const char *name;
name = get_name(id++);
if (!name)
break;
if (json_output)
jsonw_string(json_wtr, name);
else
printf("%s\n", name);
}
if (json_output)
jsonw_end_array(json_wtr); /* root array */
return 0;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s probe [COMPONENT] [full] [unprivileged] [macros [prefix PREFIX]]\n"
" %1$s %2$s list_builtins GROUP\n"
" %1$s %2$s help\n"
"\n"
" COMPONENT := { kernel | dev NAME }\n"
" GROUP := { prog_types | map_types | attach_types | link_types | helpers }\n"
" " HELP_SPEC_OPTIONS " }\n"
"",
bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "probe", do_probe },
{ "list_builtins", do_list_builtins },
{ "help", do_help },
{ 0 }
};
int do_feature(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/feature.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2015-2017 Daniel Borkmann */
/* Copyright (c) 2018 Netronome Systems, Inc. */
#include <errno.h>
#include <limits.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <linux/magic.h>
#include <fcntl.h>
#include <sys/vfs.h>
#include "main.h"
#ifndef TRACEFS_MAGIC
# define TRACEFS_MAGIC 0x74726163
#endif
#define _textify(x) #x
#define textify(x) _textify(x)
FILE *trace_pipe_fd;
char *buff;
static int validate_tracefs_mnt(const char *mnt, unsigned long magic)
{
struct statfs st_fs;
if (statfs(mnt, &st_fs) < 0)
return -ENOENT;
if ((unsigned long)st_fs.f_type != magic)
return -ENOENT;
return 0;
}
static bool
find_tracefs_mnt_single(unsigned long magic, char *mnt, const char *mntpt)
{
size_t src_len;
if (validate_tracefs_mnt(mntpt, magic))
return false;
src_len = strlen(mntpt);
if (src_len + 1 >= PATH_MAX) {
p_err("tracefs mount point name too long");
return false;
}
strcpy(mnt, mntpt);
return true;
}
static bool get_tracefs_pipe(char *mnt)
{
static const char * const known_mnts[] = {
"/sys/kernel/debug/tracing",
"/sys/kernel/tracing",
"/tracing",
"/trace",
};
const char *pipe_name = "/trace_pipe";
const char *fstype = "tracefs";
char type[100], format[32];
const char * const *ptr;
bool found = false;
FILE *fp;
for (ptr = known_mnts; ptr < known_mnts + ARRAY_SIZE(known_mnts); ptr++)
if (find_tracefs_mnt_single(TRACEFS_MAGIC, mnt, *ptr))
goto exit_found;
fp = fopen("/proc/mounts", "r");
if (!fp)
return false;
/* Allow room for NULL terminating byte and pipe file name */
snprintf(format, sizeof(format), "%%*s %%%zds %%99s %%*s %%*d %%*d\\n",
PATH_MAX - strlen(pipe_name) - 1);
while (fscanf(fp, format, mnt, type) == 2)
if (strcmp(type, fstype) == 0) {
found = true;
break;
}
fclose(fp);
/* The string from fscanf() might be truncated, check mnt is valid */
if (found && validate_tracefs_mnt(mnt, TRACEFS_MAGIC))
goto exit_found;
if (block_mount)
return false;
p_info("could not find tracefs, attempting to mount it now");
/* Most of the time, tracefs is automatically mounted by debugfs at
* /sys/kernel/debug/tracing when we try to access it. If we could not
* find it, it is likely that debugfs is not mounted. Let's give one
* attempt at mounting just tracefs at /sys/kernel/tracing.
*/
strcpy(mnt, known_mnts[1]);
if (mount_tracefs(mnt))
return false;
exit_found:
strcat(mnt, pipe_name);
return true;
}
static void exit_tracelog(int signum)
{
fclose(trace_pipe_fd);
free(buff);
if (json_output) {
jsonw_end_array(json_wtr);
jsonw_destroy(&json_wtr);
}
exit(0);
}
int do_tracelog(int argc, char **argv)
{
const struct sigaction act = {
.sa_handler = exit_tracelog
};
char trace_pipe[PATH_MAX];
size_t buff_len = 0;
if (json_output)
jsonw_start_array(json_wtr);
if (!get_tracefs_pipe(trace_pipe))
return -1;
trace_pipe_fd = fopen(trace_pipe, "r");
if (!trace_pipe_fd) {
p_err("could not open trace pipe: %s", strerror(errno));
return -1;
}
sigaction(SIGHUP, &act, NULL);
sigaction(SIGINT, &act, NULL);
sigaction(SIGTERM, &act, NULL);
while (1) {
ssize_t ret;
ret = getline(&buff, &buff_len, trace_pipe_fd);
if (ret <= 0) {
p_err("failed to read content from trace pipe: %s",
strerror(errno));
break;
}
if (json_output)
jsonw_string(json_wtr, buff);
else
printf("%s", buff);
}
fclose(trace_pipe_fd);
free(buff);
return -1;
}
| linux-master | tools/bpf/bpftool/tracelog.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <bpf/libbpf.h>
#include <bpf/libbpf_internal.h>
#include "disasm.h"
#include "json_writer.h"
#include "main.h"
#include "xlated_dumper.h"
static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
{
return ((struct kernel_sym *)sym_a)->address -
((struct kernel_sym *)sym_b)->address;
}
void kernel_syms_load(struct dump_data *dd)
{
struct kernel_sym *sym;
char buff[256];
void *tmp, *address;
FILE *fp;
fp = fopen("/proc/kallsyms", "r");
if (!fp)
return;
while (fgets(buff, sizeof(buff), fp)) {
tmp = libbpf_reallocarray(dd->sym_mapping, dd->sym_count + 1,
sizeof(*dd->sym_mapping));
if (!tmp) {
out:
free(dd->sym_mapping);
dd->sym_mapping = NULL;
fclose(fp);
return;
}
dd->sym_mapping = tmp;
sym = &dd->sym_mapping[dd->sym_count];
/* module is optional */
sym->module[0] = '\0';
/* trim the square brackets around the module name */
if (sscanf(buff, "%p %*c %s [%[^]]s", &address, sym->name, sym->module) < 2)
continue;
sym->address = (unsigned long)address;
if (!strcmp(sym->name, "__bpf_call_base")) {
dd->address_call_base = sym->address;
/* sysctl kernel.kptr_restrict was set */
if (!sym->address)
goto out;
}
if (sym->address)
dd->sym_count++;
}
fclose(fp);
qsort(dd->sym_mapping, dd->sym_count,
sizeof(*dd->sym_mapping), kernel_syms_cmp);
}
void kernel_syms_destroy(struct dump_data *dd)
{
free(dd->sym_mapping);
}
struct kernel_sym *kernel_syms_search(struct dump_data *dd,
unsigned long key)
{
struct kernel_sym sym = {
.address = key,
};
return dd->sym_mapping ?
bsearch(&sym, dd->sym_mapping, dd->sym_count,
sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
}
static void __printf(2, 3) print_insn(void *private_data, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
static void __printf(2, 3)
print_insn_for_graph(void *private_data, const char *fmt, ...)
{
char buf[64], *p;
va_list args;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
p = buf;
while (*p != '\0') {
if (*p == '\n') {
memmove(p + 3, p, strlen(buf) + 1 - (p - buf));
/* Align each instruction dump row left. */
*p++ = '\\';
*p++ = 'l';
/* Output multiline concatenation. */
*p++ = '\\';
} else if (*p == '<' || *p == '>' || *p == '|' || *p == '&') {
memmove(p + 1, p, strlen(buf) + 1 - (p - buf));
/* Escape special character. */
*p++ = '\\';
}
p++;
}
printf("%s", buf);
}
static void __printf(2, 3)
print_insn_json(void *private_data, const char *fmt, ...)
{
unsigned int l = strlen(fmt);
char chomped_fmt[l];
va_list args;
va_start(args, fmt);
if (l > 0) {
strncpy(chomped_fmt, fmt, l - 1);
chomped_fmt[l - 1] = '\0';
}
jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
va_end(args);
}
static const char *print_call_pcrel(struct dump_data *dd,
struct kernel_sym *sym,
unsigned long address,
const struct bpf_insn *insn)
{
if (!dd->nr_jited_ksyms)
/* Do not show address for interpreted programs */
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d", insn->off);
else if (sym)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d#%s", insn->off, sym->name);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d#0x%lx", insn->off, address);
return dd->scratch_buff;
}
static const char *print_call_helper(struct dump_data *dd,
struct kernel_sym *sym,
unsigned long address)
{
if (sym)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%s", sym->name);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%lx", address);
return dd->scratch_buff;
}
static const char *print_call(void *private_data,
const struct bpf_insn *insn)
{
struct dump_data *dd = private_data;
unsigned long address = dd->address_call_base + insn->imm;
struct kernel_sym *sym;
if (insn->src_reg == BPF_PSEUDO_CALL &&
(__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms)
address = dd->jited_ksyms[insn->imm];
sym = kernel_syms_search(dd, address);
if (insn->src_reg == BPF_PSEUDO_CALL)
return print_call_pcrel(dd, sym, address, insn);
else
return print_call_helper(dd, sym, address);
}
static const char *print_imm(void *private_data,
const struct bpf_insn *insn,
__u64 full_imm)
{
struct dump_data *dd = private_data;
if (insn->src_reg == BPF_PSEUDO_MAP_FD)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u]", insn->imm);
else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[idx:%u]+%u", insn->imm, (insn + 1)->imm);
else if (insn->src_reg == BPF_PSEUDO_FUNC)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"subprog[%+d]", insn->imm);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%llx", (unsigned long long)full_imm);
return dd->scratch_buff;
}
void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes, bool linum)
{
const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_json,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_func_info *record;
struct bpf_insn *insn = buf;
struct btf *btf = dd->btf;
bool double_insn = false;
unsigned int nr_skip = 0;
char func_sig[1024];
unsigned int i;
jsonw_start_array(json_wtr);
record = dd->func_info;
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
continue;
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
jsonw_start_object(json_wtr);
if (btf && record) {
if (record->insn_off == i) {
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
if (func_sig[0] != '\0') {
jsonw_name(json_wtr, "proto");
jsonw_string(json_wtr, func_sig);
}
record = (void *)record + dd->finfo_rec_size;
}
}
if (prog_linfo) {
const struct bpf_line_info *linfo;
linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
if (linfo) {
btf_dump_linfo_json(btf, linfo, linum);
nr_skip++;
}
}
jsonw_name(json_wtr, "disasm");
print_bpf_insn(&cbs, insn + i, true);
if (opcodes) {
jsonw_name(json_wtr, "opcodes");
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "code");
jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
jsonw_name(json_wtr, "src_reg");
jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
jsonw_name(json_wtr, "dst_reg");
jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
jsonw_name(json_wtr, "off");
print_hex_data_json((uint8_t *)(&insn[i].off), 2);
jsonw_name(json_wtr, "imm");
if (double_insn && i < len - 1)
print_hex_data_json((uint8_t *)(&insn[i].imm),
12);
else
print_hex_data_json((uint8_t *)(&insn[i].imm),
4);
jsonw_end_object(json_wtr);
}
jsonw_end_object(json_wtr);
}
jsonw_end_array(json_wtr);
}
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes, bool linum)
{
const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_func_info *record;
struct bpf_insn *insn = buf;
struct btf *btf = dd->btf;
unsigned int nr_skip = 0;
bool double_insn = false;
char func_sig[1024];
unsigned int i;
record = dd->func_info;
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
continue;
}
if (btf && record) {
if (record->insn_off == i) {
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
if (func_sig[0] != '\0')
printf("%s:\n", func_sig);
record = (void *)record + dd->finfo_rec_size;
}
}
if (prog_linfo) {
const struct bpf_line_info *linfo;
linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
if (linfo) {
btf_dump_linfo_plain(btf, linfo, "; ",
linum);
nr_skip++;
}
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i);
print_bpf_insn(&cbs, insn + i, true);
if (opcodes) {
printf(" ");
fprint_hex(stdout, insn + i, 8, " ");
if (double_insn && i < len - 1) {
printf(" ");
fprint_hex(stdout, insn + i + 1, 8, " ");
}
printf("\n");
}
}
}
void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
unsigned int start_idx,
bool opcodes, bool linum)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_for_graph,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
const struct bpf_line_info *last_linfo = NULL;
struct bpf_func_info *record = dd->func_info;
struct bpf_insn *insn_start = buf_start;
struct bpf_insn *insn_end = buf_end;
struct bpf_insn *cur = insn_start;
struct btf *btf = dd->btf;
bool double_insn = false;
char func_sig[1024];
for (; cur <= insn_end; cur++) {
unsigned int insn_off;
if (double_insn) {
double_insn = false;
continue;
}
double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
insn_off = (unsigned int)(cur - insn_start + start_idx);
if (btf && record) {
if (record->insn_off == insn_off) {
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
if (func_sig[0] != '\0')
printf("; %s:\\l\\\n", func_sig);
record = (void *)record + dd->finfo_rec_size;
}
}
if (prog_linfo) {
const struct bpf_line_info *linfo;
linfo = bpf_prog_linfo__lfind(prog_linfo, insn_off, 0);
if (linfo && linfo != last_linfo) {
btf_dump_linfo_dotlabel(btf, linfo, linum);
last_linfo = linfo;
}
}
printf("%d: ", insn_off);
print_bpf_insn(&cbs, cur, true);
if (opcodes) {
printf("\\ \\ \\ \\ ");
fprint_hex(stdout, cur, 8, " ");
if (double_insn && cur <= insn_end - 1) {
printf(" ");
fprint_hex(stdout, cur + 1, 8, " ");
}
printf("\\l\\\n");
}
if (cur != insn_end)
printf("| ");
}
}
| linux-master | tools/bpf/bpftool/xlated_dumper.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2017 Facebook
// Author: Roman Gushchin <[email protected]>
#define _XOPEN_SOURCE 500
#include <errno.h>
#include <fcntl.h>
#include <ftw.h>
#include <mntent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include "main.h"
#define HELP_SPEC_ATTACH_FLAGS \
"ATTACH_FLAGS := { multi | override }"
#define HELP_SPEC_ATTACH_TYPES \
" ATTACH_TYPE := { cgroup_inet_ingress | cgroup_inet_egress |\n" \
" cgroup_inet_sock_create | cgroup_sock_ops |\n" \
" cgroup_device | cgroup_inet4_bind |\n" \
" cgroup_inet6_bind | cgroup_inet4_post_bind |\n" \
" cgroup_inet6_post_bind | cgroup_inet4_connect |\n" \
" cgroup_inet6_connect | cgroup_inet4_getpeername |\n" \
" cgroup_inet6_getpeername | cgroup_inet4_getsockname |\n" \
" cgroup_inet6_getsockname | cgroup_udp4_sendmsg |\n" \
" cgroup_udp6_sendmsg | cgroup_udp4_recvmsg |\n" \
" cgroup_udp6_recvmsg | cgroup_sysctl |\n" \
" cgroup_getsockopt | cgroup_setsockopt |\n" \
" cgroup_inet_sock_release }"
static unsigned int query_flags;
static struct btf *btf_vmlinux;
static __u32 btf_vmlinux_id;
static enum bpf_attach_type parse_attach_type(const char *str)
{
const char *attach_type_str;
enum bpf_attach_type type;
for (type = 0; ; type++) {
attach_type_str = libbpf_bpf_attach_type_str(type);
if (!attach_type_str)
break;
if (!strcmp(str, attach_type_str))
return type;
}
/* Also check traditionally used attach type strings. For these we keep
* allowing prefixed usage.
*/
for (type = 0; ; type++) {
attach_type_str = bpf_attach_type_input_str(type);
if (!attach_type_str)
break;
if (is_prefix(str, attach_type_str))
return type;
}
return __MAX_BPF_ATTACH_TYPE;
}
static void guess_vmlinux_btf_id(__u32 attach_btf_obj_id)
{
struct bpf_btf_info btf_info = {};
__u32 btf_len = sizeof(btf_info);
char name[16] = {};
int err;
int fd;
btf_info.name = ptr_to_u64(name);
btf_info.name_len = sizeof(name);
fd = bpf_btf_get_fd_by_id(attach_btf_obj_id);
if (fd < 0)
return;
err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_len);
if (err)
goto out;
if (btf_info.kernel_btf && strncmp(name, "vmlinux", sizeof(name)) == 0)
btf_vmlinux_id = btf_info.id;
out:
close(fd);
}
static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
const char *attach_flags_str,
int level)
{
char prog_name[MAX_PROG_FULL_NAME];
const char *attach_btf_name = NULL;
struct bpf_prog_info info = {};
const char *attach_type_str;
__u32 info_len = sizeof(info);
int prog_fd;
prog_fd = bpf_prog_get_fd_by_id(id);
if (prog_fd < 0)
return -1;
if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
close(prog_fd);
return -1;
}
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
if (btf_vmlinux) {
if (!btf_vmlinux_id)
guess_vmlinux_btf_id(info.attach_btf_obj_id);
if (btf_vmlinux_id == info.attach_btf_obj_id &&
info.attach_btf_id < btf__type_cnt(btf_vmlinux)) {
const struct btf_type *t =
btf__type_by_id(btf_vmlinux, info.attach_btf_id);
attach_btf_name =
btf__name_by_offset(btf_vmlinux, t->name_off);
}
}
get_prog_full_name(&info, prog_fd, prog_name, sizeof(prog_name));
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "id", info.id);
if (attach_type_str)
jsonw_string_field(json_wtr, "attach_type", attach_type_str);
else
jsonw_uint_field(json_wtr, "attach_type", attach_type);
if (!(query_flags & BPF_F_QUERY_EFFECTIVE))
jsonw_string_field(json_wtr, "attach_flags", attach_flags_str);
jsonw_string_field(json_wtr, "name", prog_name);
if (attach_btf_name)
jsonw_string_field(json_wtr, "attach_btf_name", attach_btf_name);
jsonw_uint_field(json_wtr, "attach_btf_obj_id", info.attach_btf_obj_id);
jsonw_uint_field(json_wtr, "attach_btf_id", info.attach_btf_id);
jsonw_end_object(json_wtr);
} else {
printf("%s%-8u ", level ? " " : "", info.id);
if (attach_type_str)
printf("%-15s", attach_type_str);
else
printf("type %-10u", attach_type);
if (query_flags & BPF_F_QUERY_EFFECTIVE)
printf(" %-15s", prog_name);
else
printf(" %-15s %-15s", attach_flags_str, prog_name);
if (attach_btf_name)
printf(" %-15s", attach_btf_name);
else if (info.attach_btf_id)
printf(" attach_btf_obj_id=%d attach_btf_id=%d",
info.attach_btf_obj_id, info.attach_btf_id);
printf("\n");
}
close(prog_fd);
return 0;
}
static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
{
__u32 prog_cnt = 0;
int ret;
ret = bpf_prog_query(cgroup_fd, type, query_flags, NULL,
NULL, &prog_cnt);
if (ret)
return -1;
return prog_cnt;
}
static int cgroup_has_attached_progs(int cgroup_fd)
{
enum bpf_attach_type type;
bool no_prog = true;
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
int count = count_attached_bpf_progs(cgroup_fd, type);
if (count < 0 && errno != EINVAL)
return -1;
if (count > 0) {
no_prog = false;
break;
}
}
return no_prog ? 0 : 1;
}
static int show_effective_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
int level)
{
LIBBPF_OPTS(bpf_prog_query_opts, p);
__u32 prog_ids[1024] = {0};
__u32 iter;
int ret;
p.query_flags = query_flags;
p.prog_cnt = ARRAY_SIZE(prog_ids);
p.prog_ids = prog_ids;
ret = bpf_prog_query_opts(cgroup_fd, type, &p);
if (ret)
return ret;
if (p.prog_cnt == 0)
return 0;
for (iter = 0; iter < p.prog_cnt; iter++)
show_bpf_prog(prog_ids[iter], type, NULL, level);
return 0;
}
static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
int level)
{
LIBBPF_OPTS(bpf_prog_query_opts, p);
__u32 prog_attach_flags[1024] = {0};
const char *attach_flags_str;
__u32 prog_ids[1024] = {0};
char buf[32];
__u32 iter;
int ret;
p.query_flags = query_flags;
p.prog_cnt = ARRAY_SIZE(prog_ids);
p.prog_ids = prog_ids;
p.prog_attach_flags = prog_attach_flags;
ret = bpf_prog_query_opts(cgroup_fd, type, &p);
if (ret)
return ret;
if (p.prog_cnt == 0)
return 0;
for (iter = 0; iter < p.prog_cnt; iter++) {
__u32 attach_flags;
attach_flags = prog_attach_flags[iter] ?: p.attach_flags;
switch (attach_flags) {
case BPF_F_ALLOW_MULTI:
attach_flags_str = "multi";
break;
case BPF_F_ALLOW_OVERRIDE:
attach_flags_str = "override";
break;
case 0:
attach_flags_str = "";
break;
default:
snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
attach_flags_str = buf;
}
show_bpf_prog(prog_ids[iter], type,
attach_flags_str, level);
}
return 0;
}
static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
int level)
{
return query_flags & BPF_F_QUERY_EFFECTIVE ?
show_effective_bpf_progs(cgroup_fd, type, level) :
show_attached_bpf_progs(cgroup_fd, type, level);
}
static int do_show(int argc, char **argv)
{
enum bpf_attach_type type;
int has_attached_progs;
const char *path;
int cgroup_fd;
int ret = -1;
query_flags = 0;
if (!REQ_ARGS(1))
return -1;
path = GET_ARG();
while (argc) {
if (is_prefix(*argv, "effective")) {
if (query_flags & BPF_F_QUERY_EFFECTIVE) {
p_err("duplicated argument: %s", *argv);
return -1;
}
query_flags |= BPF_F_QUERY_EFFECTIVE;
NEXT_ARG();
} else {
p_err("expected no more arguments, 'effective', got: '%s'?",
*argv);
return -1;
}
}
cgroup_fd = open(path, O_RDONLY);
if (cgroup_fd < 0) {
p_err("can't open cgroup %s", path);
goto exit;
}
has_attached_progs = cgroup_has_attached_progs(cgroup_fd);
if (has_attached_progs < 0) {
p_err("can't query bpf programs attached to %s: %s",
path, strerror(errno));
goto exit_cgroup;
} else if (!has_attached_progs) {
ret = 0;
goto exit_cgroup;
}
if (json_output)
jsonw_start_array(json_wtr);
else if (query_flags & BPF_F_QUERY_EFFECTIVE)
printf("%-8s %-15s %-15s\n", "ID", "AttachType", "Name");
else
printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
"AttachFlags", "Name");
btf_vmlinux = libbpf_find_kernel_btf();
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
/*
* Not all attach types may be supported, so it's expected,
* that some requests will fail.
* If we were able to get the show for at least one
* attach type, let's return 0.
*/
if (show_bpf_progs(cgroup_fd, type, 0) == 0)
ret = 0;
}
if (json_output)
jsonw_end_array(json_wtr);
exit_cgroup:
close(cgroup_fd);
exit:
return ret;
}
/*
* To distinguish nftw() errors and do_show_tree_fn() errors
* and avoid duplicating error messages, let's return -2
* from do_show_tree_fn() in case of error.
*/
#define NFTW_ERR -1
#define SHOW_TREE_FN_ERR -2
static int do_show_tree_fn(const char *fpath, const struct stat *sb,
int typeflag, struct FTW *ftw)
{
enum bpf_attach_type type;
int has_attached_progs;
int cgroup_fd;
if (typeflag != FTW_D)
return 0;
cgroup_fd = open(fpath, O_RDONLY);
if (cgroup_fd < 0) {
p_err("can't open cgroup %s: %s", fpath, strerror(errno));
return SHOW_TREE_FN_ERR;
}
has_attached_progs = cgroup_has_attached_progs(cgroup_fd);
if (has_attached_progs < 0) {
p_err("can't query bpf programs attached to %s: %s",
fpath, strerror(errno));
close(cgroup_fd);
return SHOW_TREE_FN_ERR;
} else if (!has_attached_progs) {
close(cgroup_fd);
return 0;
}
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_string_field(json_wtr, "cgroup", fpath);
jsonw_name(json_wtr, "programs");
jsonw_start_array(json_wtr);
} else {
printf("%s\n", fpath);
}
btf_vmlinux = libbpf_find_kernel_btf();
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
show_bpf_progs(cgroup_fd, type, ftw->level);
if (errno == EINVAL)
/* Last attach type does not support query.
* Do not report an error for this, especially because batch
* mode would stop processing commands.
*/
errno = 0;
if (json_output) {
jsonw_end_array(json_wtr);
jsonw_end_object(json_wtr);
}
close(cgroup_fd);
return 0;
}
static char *find_cgroup_root(void)
{
struct mntent *mnt;
FILE *f;
f = fopen("/proc/mounts", "r");
if (f == NULL)
return NULL;
while ((mnt = getmntent(f))) {
if (strcmp(mnt->mnt_type, "cgroup2") == 0) {
fclose(f);
return strdup(mnt->mnt_dir);
}
}
fclose(f);
return NULL;
}
static int do_show_tree(int argc, char **argv)
{
char *cgroup_root, *cgroup_alloced = NULL;
int ret;
query_flags = 0;
if (!argc) {
cgroup_alloced = find_cgroup_root();
if (!cgroup_alloced) {
p_err("cgroup v2 isn't mounted");
return -1;
}
cgroup_root = cgroup_alloced;
} else {
cgroup_root = GET_ARG();
while (argc) {
if (is_prefix(*argv, "effective")) {
if (query_flags & BPF_F_QUERY_EFFECTIVE) {
p_err("duplicated argument: %s", *argv);
return -1;
}
query_flags |= BPF_F_QUERY_EFFECTIVE;
NEXT_ARG();
} else {
p_err("expected no more arguments, 'effective', got: '%s'?",
*argv);
return -1;
}
}
}
if (json_output)
jsonw_start_array(json_wtr);
else if (query_flags & BPF_F_QUERY_EFFECTIVE)
printf("%s\n"
"%-8s %-15s %-15s\n",
"CgroupPath",
"ID", "AttachType", "Name");
else
printf("%s\n"
"%-8s %-15s %-15s %-15s\n",
"CgroupPath",
"ID", "AttachType", "AttachFlags", "Name");
switch (nftw(cgroup_root, do_show_tree_fn, 1024, FTW_MOUNT)) {
case NFTW_ERR:
p_err("can't iterate over %s: %s", cgroup_root,
strerror(errno));
ret = -1;
break;
case SHOW_TREE_FN_ERR:
ret = -1;
break;
default:
ret = 0;
}
if (json_output)
jsonw_end_array(json_wtr);
free(cgroup_alloced);
return ret;
}
static int do_attach(int argc, char **argv)
{
enum bpf_attach_type attach_type;
int cgroup_fd, prog_fd;
int attach_flags = 0;
int ret = -1;
int i;
if (argc < 4) {
p_err("too few parameters for cgroup attach");
goto exit;
}
cgroup_fd = open(argv[0], O_RDONLY);
if (cgroup_fd < 0) {
p_err("can't open cgroup %s", argv[0]);
goto exit;
}
attach_type = parse_attach_type(argv[1]);
if (attach_type == __MAX_BPF_ATTACH_TYPE) {
p_err("invalid attach type");
goto exit_cgroup;
}
argc -= 2;
argv = &argv[2];
prog_fd = prog_parse_fd(&argc, &argv);
if (prog_fd < 0)
goto exit_cgroup;
for (i = 0; i < argc; i++) {
if (is_prefix(argv[i], "multi")) {
attach_flags |= BPF_F_ALLOW_MULTI;
} else if (is_prefix(argv[i], "override")) {
attach_flags |= BPF_F_ALLOW_OVERRIDE;
} else {
p_err("unknown option: %s", argv[i]);
goto exit_cgroup;
}
}
if (bpf_prog_attach(prog_fd, cgroup_fd, attach_type, attach_flags)) {
p_err("failed to attach program");
goto exit_prog;
}
if (json_output)
jsonw_null(json_wtr);
ret = 0;
exit_prog:
close(prog_fd);
exit_cgroup:
close(cgroup_fd);
exit:
return ret;
}
static int do_detach(int argc, char **argv)
{
enum bpf_attach_type attach_type;
int prog_fd, cgroup_fd;
int ret = -1;
if (argc < 4) {
p_err("too few parameters for cgroup detach");
goto exit;
}
cgroup_fd = open(argv[0], O_RDONLY);
if (cgroup_fd < 0) {
p_err("can't open cgroup %s", argv[0]);
goto exit;
}
attach_type = parse_attach_type(argv[1]);
if (attach_type == __MAX_BPF_ATTACH_TYPE) {
p_err("invalid attach type");
goto exit_cgroup;
}
argc -= 2;
argv = &argv[2];
prog_fd = prog_parse_fd(&argc, &argv);
if (prog_fd < 0)
goto exit_cgroup;
if (bpf_prog_detach2(prog_fd, cgroup_fd, attach_type)) {
p_err("failed to detach program");
goto exit_prog;
}
if (json_output)
jsonw_null(json_wtr);
ret = 0;
exit_prog:
close(prog_fd);
exit_cgroup:
close(cgroup_fd);
exit:
return ret;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s %2$s { show | list } CGROUP [**effective**]\n"
" %1$s %2$s tree [CGROUP_ROOT] [**effective**]\n"
" %1$s %2$s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
" %1$s %2$s detach CGROUP ATTACH_TYPE PROG\n"
" %1$s %2$s help\n"
"\n"
HELP_SPEC_ATTACH_TYPES "\n"
" " HELP_SPEC_ATTACH_FLAGS "\n"
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} }\n"
"",
bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
{ "tree", do_show_tree },
{ "attach", do_attach },
{ "detach", do_detach },
{ "help", do_help },
{ 0 }
};
int do_cgroup(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}
| linux-master | tools/bpf/bpftool/cgroup.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2020 Facebook
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_perf_event_value___local {
__u64 counter;
__u64 enabled;
__u64 running;
} __attribute__((preserve_access_index));
/* map of perf event fds, num_cpu * num_metric entries */
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(int));
} events SEC(".maps");
/* readings at fentry */
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(struct bpf_perf_event_value___local));
} fentry_readings SEC(".maps");
/* accumulated readings */
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(struct bpf_perf_event_value___local));
} accum_readings SEC(".maps");
/* sample counts, one per cpu */
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(u64));
} counts SEC(".maps");
const volatile __u32 num_cpu = 1;
const volatile __u32 num_metric = 1;
#define MAX_NUM_MATRICS 4
SEC("fentry/XXX")
int BPF_PROG(fentry_XXX)
{
struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
u32 key = bpf_get_smp_processor_id();
u32 i;
/* look up before reading, to reduce error */
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
u32 flag = i;
ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
if (!ptrs[i])
return 0;
}
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
struct bpf_perf_event_value___local reading;
int err;
err = bpf_perf_event_read_value(&events, key, (void *)&reading,
sizeof(reading));
if (err)
return 0;
*(ptrs[i]) = reading;
key += num_cpu;
}
return 0;
}
static inline void
fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
{
struct bpf_perf_event_value___local *before, diff;
before = bpf_map_lookup_elem(&fentry_readings, &id);
/* only account samples with a valid fentry_reading */
if (before && before->counter) {
struct bpf_perf_event_value___local *accum;
diff.counter = after->counter - before->counter;
diff.enabled = after->enabled - before->enabled;
diff.running = after->running - before->running;
accum = bpf_map_lookup_elem(&accum_readings, &id);
if (accum) {
accum->counter += diff.counter;
accum->enabled += diff.enabled;
accum->running += diff.running;
}
}
}
SEC("fexit/XXX")
int BPF_PROG(fexit_XXX)
{
struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
u32 cpu = bpf_get_smp_processor_id();
u32 i, zero = 0;
int err;
u64 *count;
/* read all events before updating the maps, to reduce error */
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
(void *)(readings + i),
sizeof(*readings));
if (err)
return 0;
}
count = bpf_map_lookup_elem(&counts, &zero);
if (count) {
*count += 1;
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
fexit_update_maps(i, &readings[i]);
}
return 0;
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/bpf/bpftool/skeleton/profiler.bpf.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2020 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>
#include "pid_iter.h"
/* keep in sync with the definition in main.h */
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
BPF_OBJ_MAP,
BPF_OBJ_LINK,
BPF_OBJ_BTF,
};
struct bpf_perf_link___local {
struct bpf_link link;
struct file *perf_file;
} __attribute__((preserve_access_index));
struct perf_event___local {
u64 bpf_cookie;
} __attribute__((preserve_access_index));
enum bpf_link_type___local {
BPF_LINK_TYPE_PERF_EVENT___local = 7,
};
extern const void bpf_link_fops __ksym;
extern const void bpf_map_fops __ksym;
extern const void bpf_prog_fops __ksym;
extern const void btf_fops __ksym;
const volatile enum bpf_obj_type obj_type = BPF_OBJ_UNKNOWN;
static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type)
{
switch (type) {
case BPF_OBJ_PROG:
return BPF_CORE_READ((struct bpf_prog *)ent, aux, id);
case BPF_OBJ_MAP:
return BPF_CORE_READ((struct bpf_map *)ent, id);
case BPF_OBJ_BTF:
return BPF_CORE_READ((struct btf *)ent, id);
case BPF_OBJ_LINK:
return BPF_CORE_READ((struct bpf_link *)ent, id);
default:
return 0;
}
}
/* could be used only with BPF_LINK_TYPE_PERF_EVENT links */
static __u64 get_bpf_cookie(struct bpf_link *link)
{
struct bpf_perf_link___local *perf_link;
struct perf_event___local *event;
perf_link = container_of(link, struct bpf_perf_link___local, link);
event = BPF_CORE_READ(perf_link, perf_file, private_data);
return BPF_CORE_READ(event, bpf_cookie);
}
SEC("iter/task_file")
int iter(struct bpf_iter__task_file *ctx)
{
struct file *file = ctx->file;
struct task_struct *task = ctx->task;
struct pid_iter_entry e;
const void *fops;
if (!file || !task)
return 0;
switch (obj_type) {
case BPF_OBJ_PROG:
fops = &bpf_prog_fops;
break;
case BPF_OBJ_MAP:
fops = &bpf_map_fops;
break;
case BPF_OBJ_BTF:
fops = &btf_fops;
break;
case BPF_OBJ_LINK:
fops = &bpf_link_fops;
break;
default:
return 0;
}
if (file->f_op != fops)
return 0;
__builtin_memset(&e, 0, sizeof(e));
e.pid = task->tgid;
e.id = get_obj_id(file->private_data, obj_type);
if (obj_type == BPF_OBJ_LINK &&
bpf_core_enum_value_exists(enum bpf_link_type___local,
BPF_LINK_TYPE_PERF_EVENT___local)) {
struct bpf_link *link = (struct bpf_link *) file->private_data;
if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
BPF_LINK_TYPE_PERF_EVENT___local)) {
e.has_bpf_cookie = true;
e.bpf_cookie = get_bpf_cookie(link);
}
}
bpf_probe_read_kernel_str(&e.comm, sizeof(e.comm),
task->group_leader->comm);
bpf_seq_write(ctx->meta->seq, &e, sizeof(e));
return 0;
}
char LICENSE[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/bpf/bpftool/skeleton/pid_iter.bpf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* IIO - useful set of util functionality
*
* Copyright (c) 2008 Jonathan Cameron
*/
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <dirent.h>
#include <errno.h>
#include <ctype.h>
#include "iio_utils.h"
const char *iio_dir = "/sys/bus/iio/devices/";
static char * const iio_direction[] = {
"in",
"out",
};
/**
* iioutils_break_up_name() - extract generic name from full channel name
* @full_name: the full channel name
* @generic_name: the output generic channel name
*
* Returns 0 on success, or a negative error code if string extraction failed.
**/
int iioutils_break_up_name(const char *full_name, char **generic_name)
{
char *current;
char *w, *r;
char *working, *prefix = "";
int i, ret;
for (i = 0; i < ARRAY_SIZE(iio_direction); i++)
if (!strncmp(full_name, iio_direction[i],
strlen(iio_direction[i]))) {
prefix = iio_direction[i];
break;
}
current = strdup(full_name + strlen(prefix) + 1);
if (!current)
return -ENOMEM;
working = strtok(current, "_\0");
if (!working) {
free(current);
return -EINVAL;
}
w = working;
r = working;
while (*r != '\0') {
if (!isdigit(*r)) {
*w = *r;
w++;
}
r++;
}
*w = '\0';
ret = asprintf(generic_name, "%s_%s", prefix, working);
free(current);
return (ret == -1) ? -ENOMEM : 0;
}
/**
* iioutils_get_type() - find and process _type attribute data
* @is_signed: output whether channel is signed
* @bytes: output how many bytes the channel storage occupies
* @bits_used: output number of valid bits of data
* @shift: output amount of bits to shift right data before applying bit mask
* @mask: output a bit mask for the raw data
* @be: output if data in big endian
* @device_dir: the IIO device directory
* @buffer_idx: the IIO buffer index
* @name: the channel name
* @generic_name: the channel type name
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
static int iioutils_get_type(unsigned int *is_signed, unsigned int *bytes,
unsigned int *bits_used, unsigned int *shift,
uint64_t *mask, unsigned int *be,
const char *device_dir, int buffer_idx,
const char *name, const char *generic_name)
{
FILE *sysfsfp;
int ret;
DIR *dp;
char *scan_el_dir, *builtname, *builtname_generic, *filename = 0;
char signchar, endianchar;
unsigned padint;
const struct dirent *ent;
ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir, buffer_idx);
if (ret < 0)
return -ENOMEM;
ret = asprintf(&builtname, FORMAT_TYPE_FILE, name);
if (ret < 0) {
ret = -ENOMEM;
goto error_free_scan_el_dir;
}
ret = asprintf(&builtname_generic, FORMAT_TYPE_FILE, generic_name);
if (ret < 0) {
ret = -ENOMEM;
goto error_free_builtname;
}
dp = opendir(scan_el_dir);
if (!dp) {
ret = -errno;
goto error_free_builtname_generic;
}
ret = -ENOENT;
while (ent = readdir(dp), ent)
if ((strcmp(builtname, ent->d_name) == 0) ||
(strcmp(builtname_generic, ent->d_name) == 0)) {
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
if (ret < 0) {
ret = -ENOMEM;
goto error_closedir;
}
sysfsfp = fopen(filename, "r");
if (!sysfsfp) {
ret = -errno;
fprintf(stderr, "failed to open %s\n",
filename);
goto error_free_filename;
}
ret = fscanf(sysfsfp,
"%ce:%c%u/%u>>%u",
&endianchar,
&signchar,
bits_used,
&padint, shift);
if (ret < 0) {
ret = -errno;
fprintf(stderr,
"failed to pass scan type description\n");
goto error_close_sysfsfp;
} else if (ret != 5) {
ret = -EIO;
fprintf(stderr,
"scan type description didn't match\n");
goto error_close_sysfsfp;
}
*be = (endianchar == 'b');
*bytes = padint / 8;
if (*bits_used == 64)
*mask = ~(0ULL);
else
*mask = (1ULL << *bits_used) - 1ULL;
*is_signed = (signchar == 's');
if (fclose(sysfsfp)) {
ret = -errno;
fprintf(stderr, "Failed to close %s\n",
filename);
goto error_free_filename;
}
sysfsfp = 0;
free(filename);
filename = 0;
/*
* Avoid having a more generic entry overwriting
* the settings.
*/
if (strcmp(builtname, ent->d_name) == 0)
break;
}
error_close_sysfsfp:
if (sysfsfp)
if (fclose(sysfsfp))
perror("iioutils_get_type(): Failed to close file");
error_free_filename:
if (filename)
free(filename);
error_closedir:
if (closedir(dp) == -1)
perror("iioutils_get_type(): Failed to close directory");
error_free_builtname_generic:
free(builtname_generic);
error_free_builtname:
free(builtname);
error_free_scan_el_dir:
free(scan_el_dir);
return ret;
}
/**
* iioutils_get_param_float() - read a float value from a channel parameter
* @output: output the float value
* @param_name: the parameter name to read
* @device_dir: the IIO device directory in sysfs
* @name: the channel name
* @generic_name: the channel type name
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
int iioutils_get_param_float(float *output, const char *param_name,
const char *device_dir, const char *name,
const char *generic_name)
{
FILE *sysfsfp;
int ret;
DIR *dp;
char *builtname, *builtname_generic;
char *filename = NULL;
const struct dirent *ent;
ret = asprintf(&builtname, "%s_%s", name, param_name);
if (ret < 0)
return -ENOMEM;
ret = asprintf(&builtname_generic,
"%s_%s", generic_name, param_name);
if (ret < 0) {
ret = -ENOMEM;
goto error_free_builtname;
}
dp = opendir(device_dir);
if (!dp) {
ret = -errno;
goto error_free_builtname_generic;
}
ret = -ENOENT;
while (ent = readdir(dp), ent)
if ((strcmp(builtname, ent->d_name) == 0) ||
(strcmp(builtname_generic, ent->d_name) == 0)) {
ret = asprintf(&filename,
"%s/%s", device_dir, ent->d_name);
if (ret < 0) {
ret = -ENOMEM;
goto error_closedir;
}
sysfsfp = fopen(filename, "r");
if (!sysfsfp) {
ret = -errno;
goto error_free_filename;
}
errno = 0;
if (fscanf(sysfsfp, "%f", output) != 1)
ret = errno ? -errno : -ENODATA;
fclose(sysfsfp);
break;
}
error_free_filename:
if (filename)
free(filename);
error_closedir:
if (closedir(dp) == -1)
perror("iioutils_get_param_float(): Failed to close directory");
error_free_builtname_generic:
free(builtname_generic);
error_free_builtname:
free(builtname);
return ret;
}
/**
* bsort_channel_array_by_index() - sort the array in index order
* @ci_array: the iio_channel_info array to be sorted
* @cnt: the amount of array elements
**/
void bsort_channel_array_by_index(struct iio_channel_info *ci_array, int cnt)
{
struct iio_channel_info temp;
int x, y;
for (x = 0; x < cnt; x++)
for (y = 0; y < (cnt - 1); y++)
if (ci_array[y].index > ci_array[y + 1].index) {
temp = ci_array[y + 1];
ci_array[y + 1] = ci_array[y];
ci_array[y] = temp;
}
}
/**
* build_channel_array() - function to figure out what channels are present
* @device_dir: the IIO device directory in sysfs
* @buffer_idx: the IIO buffer for this channel array
* @ci_array: output the resulting array of iio_channel_info
* @counter: output the amount of array elements
*
* Returns 0 on success, otherwise a negative error code.
**/
int build_channel_array(const char *device_dir, int buffer_idx,
struct iio_channel_info **ci_array, int *counter)
{
DIR *dp;
FILE *sysfsfp;
int count = 0, i;
struct iio_channel_info *current;
int ret;
const struct dirent *ent;
char *scan_el_dir;
char *filename;
*counter = 0;
ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir, buffer_idx);
if (ret < 0)
return -ENOMEM;
dp = opendir(scan_el_dir);
if (!dp) {
ret = -errno;
goto error_free_name;
}
while (ent = readdir(dp), ent)
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
if (ret < 0) {
ret = -ENOMEM;
goto error_close_dir;
}
sysfsfp = fopen(filename, "r");
free(filename);
if (!sysfsfp) {
ret = -errno;
goto error_close_dir;
}
errno = 0;
if (fscanf(sysfsfp, "%i", &ret) != 1) {
ret = errno ? -errno : -ENODATA;
if (fclose(sysfsfp))
perror("build_channel_array(): Failed to close file");
goto error_close_dir;
}
if (ret == 1)
(*counter)++;
if (fclose(sysfsfp)) {
ret = -errno;
goto error_close_dir;
}
}
*ci_array = malloc(sizeof(**ci_array) * (*counter));
if (!*ci_array) {
ret = -ENOMEM;
goto error_close_dir;
}
seekdir(dp, 0);
while (ent = readdir(dp), ent) {
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
int current_enabled = 0;
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
if (ret < 0) {
ret = -ENOMEM;
/* decrement count to avoid freeing name */
count--;
goto error_cleanup_array;
}
sysfsfp = fopen(filename, "r");
free(filename);
if (!sysfsfp) {
ret = -errno;
count--;
goto error_cleanup_array;
}
errno = 0;
if (fscanf(sysfsfp, "%i", ¤t_enabled) != 1) {
ret = errno ? -errno : -ENODATA;
count--;
goto error_cleanup_array;
}
if (fclose(sysfsfp)) {
ret = -errno;
count--;
goto error_cleanup_array;
}
if (!current_enabled) {
count--;
continue;
}
current->scale = 1.0;
current->offset = 0;
current->name = strndup(ent->d_name,
strlen(ent->d_name) -
strlen("_en"));
if (!current->name) {
ret = -ENOMEM;
count--;
goto error_cleanup_array;
}
/* Get the generic and specific name elements */
ret = iioutils_break_up_name(current->name,
¤t->generic_name);
if (ret) {
free(current->name);
count--;
goto error_cleanup_array;
}
ret = asprintf(&filename,
"%s/%s_index",
scan_el_dir,
current->name);
if (ret < 0) {
ret = -ENOMEM;
goto error_cleanup_array;
}
sysfsfp = fopen(filename, "r");
free(filename);
if (!sysfsfp) {
ret = -errno;
fprintf(stderr, "failed to open %s/%s_index\n",
scan_el_dir, current->name);
goto error_cleanup_array;
}
errno = 0;
if (fscanf(sysfsfp, "%u", ¤t->index) != 1) {
ret = errno ? -errno : -ENODATA;
if (fclose(sysfsfp))
perror("build_channel_array(): Failed to close file");
goto error_cleanup_array;
}
if (fclose(sysfsfp)) {
ret = -errno;
goto error_cleanup_array;
}
/* Find the scale */
ret = iioutils_get_param_float(¤t->scale,
"scale",
device_dir,
current->name,
current->generic_name);
if ((ret < 0) && (ret != -ENOENT))
goto error_cleanup_array;
ret = iioutils_get_param_float(¤t->offset,
"offset",
device_dir,
current->name,
current->generic_name);
if ((ret < 0) && (ret != -ENOENT))
goto error_cleanup_array;
ret = iioutils_get_type(¤t->is_signed,
¤t->bytes,
¤t->bits_used,
¤t->shift,
¤t->mask,
¤t->be,
device_dir,
buffer_idx,
current->name,
current->generic_name);
if (ret < 0)
goto error_cleanup_array;
}
}
if (closedir(dp) == -1) {
ret = -errno;
goto error_cleanup_array;
}
free(scan_el_dir);
/* reorder so that the array is in index order */
bsort_channel_array_by_index(*ci_array, *counter);
return 0;
error_cleanup_array:
for (i = count - 1; i >= 0; i--) {
free((*ci_array)[i].name);
free((*ci_array)[i].generic_name);
}
free(*ci_array);
*ci_array = NULL;
*counter = 0;
error_close_dir:
if (dp)
if (closedir(dp) == -1)
perror("build_channel_array(): Failed to close dir");
error_free_name:
free(scan_el_dir);
return ret;
}
static int calc_digits(int num)
{
int count = 0;
/* It takes a digit to represent zero */
if (!num)
return 1;
while (num != 0) {
num /= 10;
count++;
}
return count;
}
/**
* find_type_by_name() - function to match top level types by name
* @name: top level type instance name
* @type: the type of top level instance being searched
*
* Returns the device number of a matched IIO device on success, otherwise a
* negative error code.
* Typical types this is used for are device and trigger.
**/
int find_type_by_name(const char *name, const char *type)
{
const struct dirent *ent;
int number, numstrlen, ret;
FILE *namefp;
DIR *dp;
char thisname[IIO_MAX_NAME_LENGTH];
char *filename;
dp = opendir(iio_dir);
if (!dp) {
fprintf(stderr, "No industrialio devices available\n");
return -ENODEV;
}
while (ent = readdir(dp), ent) {
if (strcmp(ent->d_name, ".") != 0 &&
strcmp(ent->d_name, "..") != 0 &&
strlen(ent->d_name) > strlen(type) &&
strncmp(ent->d_name, type, strlen(type)) == 0) {
errno = 0;
ret = sscanf(ent->d_name + strlen(type), "%d", &number);
if (ret < 0) {
ret = -errno;
fprintf(stderr,
"failed to read element number\n");
goto error_close_dir;
} else if (ret != 1) {
ret = -EIO;
fprintf(stderr,
"failed to match element number\n");
goto error_close_dir;
}
numstrlen = calc_digits(number);
/* verify the next character is not a colon */
if (strncmp(ent->d_name + strlen(type) + numstrlen,
":", 1) != 0) {
filename = malloc(strlen(iio_dir) + strlen(type)
+ numstrlen + 6);
if (!filename) {
ret = -ENOMEM;
goto error_close_dir;
}
ret = sprintf(filename, "%s%s%d/name", iio_dir,
type, number);
if (ret < 0) {
free(filename);
goto error_close_dir;
}
namefp = fopen(filename, "r");
if (!namefp) {
free(filename);
continue;
}
free(filename);
errno = 0;
if (fscanf(namefp, "%s", thisname) != 1) {
ret = errno ? -errno : -ENODATA;
goto error_close_dir;
}
if (fclose(namefp)) {
ret = -errno;
goto error_close_dir;
}
if (strcmp(name, thisname) == 0) {
if (closedir(dp) == -1)
return -errno;
return number;
}
}
}
}
if (closedir(dp) == -1)
return -errno;
return -ENODEV;
error_close_dir:
if (closedir(dp) == -1)
perror("find_type_by_name(): Failed to close directory");
return ret;
}
static int _write_sysfs_int(const char *filename, const char *basedir, int val,
int verify)
{
int ret = 0;
FILE *sysfsfp;
int test;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
if (!temp)
return -ENOMEM;
ret = sprintf(temp, "%s/%s", basedir, filename);
if (ret < 0)
goto error_free;
sysfsfp = fopen(temp, "w");
if (!sysfsfp) {
ret = -errno;
fprintf(stderr, "failed to open %s\n", temp);
goto error_free;
}
ret = fprintf(sysfsfp, "%d", val);
if (ret < 0) {
if (fclose(sysfsfp))
perror("_write_sysfs_int(): Failed to close dir");
goto error_free;
}
if (fclose(sysfsfp)) {
ret = -errno;
goto error_free;
}
if (verify) {
sysfsfp = fopen(temp, "r");
if (!sysfsfp) {
ret = -errno;
fprintf(stderr, "failed to open %s\n", temp);
goto error_free;
}
if (fscanf(sysfsfp, "%d", &test) != 1) {
ret = errno ? -errno : -ENODATA;
if (fclose(sysfsfp))
perror("_write_sysfs_int(): Failed to close dir");
goto error_free;
}
if (fclose(sysfsfp)) {
ret = -errno;
goto error_free;
}
if (test != val) {
fprintf(stderr,
"Possible failure in int write %d to %s/%s\n",
val, basedir, filename);
ret = -1;
}
}
error_free:
free(temp);
return ret;
}
/**
* write_sysfs_int() - write an integer value to a sysfs file
* @filename: name of the file to write to
* @basedir: the sysfs directory in which the file is to be found
* @val: integer value to write to file
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
int write_sysfs_int(const char *filename, const char *basedir, int val)
{
return _write_sysfs_int(filename, basedir, val, 0);
}
/**
* write_sysfs_int_and_verify() - write an integer value to a sysfs file
* and verify
* @filename: name of the file to write to
* @basedir: the sysfs directory in which the file is to be found
* @val: integer value to write to file
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
int write_sysfs_int_and_verify(const char *filename, const char *basedir,
int val)
{
return _write_sysfs_int(filename, basedir, val, 1);
}
static int _write_sysfs_string(const char *filename, const char *basedir,
const char *val, int verify)
{
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
if (!temp) {
fprintf(stderr, "Memory allocation failed\n");
return -ENOMEM;
}
ret = sprintf(temp, "%s/%s", basedir, filename);
if (ret < 0)
goto error_free;
sysfsfp = fopen(temp, "w");
if (!sysfsfp) {
ret = -errno;
fprintf(stderr, "Could not open %s\n", temp);
goto error_free;
}
ret = fprintf(sysfsfp, "%s", val);
if (ret < 0) {
if (fclose(sysfsfp))
perror("_write_sysfs_string(): Failed to close dir");
goto error_free;
}
if (fclose(sysfsfp)) {
ret = -errno;
goto error_free;
}
if (verify) {
sysfsfp = fopen(temp, "r");
if (!sysfsfp) {
ret = -errno;
fprintf(stderr, "Could not open file to verify\n");
goto error_free;
}
if (fscanf(sysfsfp, "%s", temp) != 1) {
ret = errno ? -errno : -ENODATA;
if (fclose(sysfsfp))
perror("_write_sysfs_string(): Failed to close dir");
goto error_free;
}
if (fclose(sysfsfp)) {
ret = -errno;
goto error_free;
}
if (strcmp(temp, val) != 0) {
fprintf(stderr,
"Possible failure in string write of %s "
"Should be %s written to %s/%s\n", temp, val,
basedir, filename);
ret = -1;
}
}
error_free:
free(temp);
return ret;
}
/**
* write_sysfs_string_and_verify() - string write, readback and verify
* @filename: name of file to write to
* @basedir: the sysfs directory in which the file is to be found
* @val: the string to write
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
int write_sysfs_string_and_verify(const char *filename, const char *basedir,
const char *val)
{
return _write_sysfs_string(filename, basedir, val, 1);
}
/**
* write_sysfs_string() - write string to a sysfs file
* @filename: name of file to write to
* @basedir: the sysfs directory in which the file is to be found
* @val: the string to write
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
int write_sysfs_string(const char *filename, const char *basedir,
const char *val)
{
return _write_sysfs_string(filename, basedir, val, 0);
}
/**
* read_sysfs_posint() - read an integer value from file
* @filename: name of file to read from
* @basedir: the sysfs directory in which the file is to be found
*
* Returns the read integer value >= 0 on success, otherwise a negative error
* code.
**/
int read_sysfs_posint(const char *filename, const char *basedir)
{
int ret;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
if (!temp) {
fprintf(stderr, "Memory allocation failed");
return -ENOMEM;
}
ret = sprintf(temp, "%s/%s", basedir, filename);
if (ret < 0)
goto error_free;
sysfsfp = fopen(temp, "r");
if (!sysfsfp) {
ret = -errno;
goto error_free;
}
errno = 0;
if (fscanf(sysfsfp, "%d\n", &ret) != 1) {
ret = errno ? -errno : -ENODATA;
if (fclose(sysfsfp))
perror("read_sysfs_posint(): Failed to close dir");
goto error_free;
}
if (fclose(sysfsfp))
ret = -errno;
error_free:
free(temp);
return ret;
}
/**
* read_sysfs_float() - read a float value from file
* @filename: name of file to read from
* @basedir: the sysfs directory in which the file is to be found
* @val: output the read float value
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
int read_sysfs_float(const char *filename, const char *basedir, float *val)
{
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
if (!temp) {
fprintf(stderr, "Memory allocation failed");
return -ENOMEM;
}
ret = sprintf(temp, "%s/%s", basedir, filename);
if (ret < 0)
goto error_free;
sysfsfp = fopen(temp, "r");
if (!sysfsfp) {
ret = -errno;
goto error_free;
}
errno = 0;
if (fscanf(sysfsfp, "%f\n", val) != 1) {
ret = errno ? -errno : -ENODATA;
if (fclose(sysfsfp))
perror("read_sysfs_float(): Failed to close dir");
goto error_free;
}
if (fclose(sysfsfp))
ret = -errno;
error_free:
free(temp);
return ret;
}
/**
* read_sysfs_string() - read a string from file
* @filename: name of file to read from
* @basedir: the sysfs directory in which the file is to be found
* @str: output the read string
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
int read_sysfs_string(const char *filename, const char *basedir, char *str)
{
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
if (!temp) {
fprintf(stderr, "Memory allocation failed");
return -ENOMEM;
}
ret = sprintf(temp, "%s/%s", basedir, filename);
if (ret < 0)
goto error_free;
sysfsfp = fopen(temp, "r");
if (!sysfsfp) {
ret = -errno;
goto error_free;
}
errno = 0;
if (fscanf(sysfsfp, "%s\n", str) != 1) {
ret = errno ? -errno : -ENODATA;
if (fclose(sysfsfp))
perror("read_sysfs_string(): Failed to close dir");
goto error_free;
}
if (fclose(sysfsfp))
ret = -errno;
error_free:
free(temp);
return ret;
}
| linux-master | tools/iio/iio_utils.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Industrialio event test code.
*
* Copyright (c) 2011-2012 Lars-Peter Clausen <[email protected]>
*
* This program is primarily intended as an example application.
* Reads the current buffer setup from sysfs and starts a short capture
* from the specified device, pretty printing the result after appropriate
* conversion.
*
* Usage:
* iio_event_monitor <device_name>
*/
#include <unistd.h>
#include <stdlib.h>
#include <dirent.h>
#include <stdbool.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <poll.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include "iio_utils.h"
#include <linux/iio/events.h>
#include <linux/iio/types.h>
static const char * const iio_chan_type_name_spec[] = {
[IIO_VOLTAGE] = "voltage",
[IIO_CURRENT] = "current",
[IIO_POWER] = "power",
[IIO_ACCEL] = "accel",
[IIO_ANGL_VEL] = "anglvel",
[IIO_MAGN] = "magn",
[IIO_LIGHT] = "illuminance",
[IIO_INTENSITY] = "intensity",
[IIO_PROXIMITY] = "proximity",
[IIO_TEMP] = "temp",
[IIO_INCLI] = "incli",
[IIO_ROT] = "rot",
[IIO_ANGL] = "angl",
[IIO_TIMESTAMP] = "timestamp",
[IIO_CAPACITANCE] = "capacitance",
[IIO_ALTVOLTAGE] = "altvoltage",
[IIO_CCT] = "cct",
[IIO_PRESSURE] = "pressure",
[IIO_HUMIDITYRELATIVE] = "humidityrelative",
[IIO_ACTIVITY] = "activity",
[IIO_STEPS] = "steps",
[IIO_ENERGY] = "energy",
[IIO_DISTANCE] = "distance",
[IIO_VELOCITY] = "velocity",
[IIO_CONCENTRATION] = "concentration",
[IIO_RESISTANCE] = "resistance",
[IIO_PH] = "ph",
[IIO_UVINDEX] = "uvindex",
[IIO_GRAVITY] = "gravity",
[IIO_POSITIONRELATIVE] = "positionrelative",
[IIO_PHASE] = "phase",
[IIO_MASSCONCENTRATION] = "massconcentration",
};
static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_THRESH] = "thresh",
[IIO_EV_TYPE_MAG] = "mag",
[IIO_EV_TYPE_ROC] = "roc",
[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
[IIO_EV_TYPE_CHANGE] = "change",
[IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
[IIO_EV_TYPE_GESTURE] = "gesture",
};
static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_EITHER] = "either",
[IIO_EV_DIR_RISING] = "rising",
[IIO_EV_DIR_FALLING] = "falling",
[IIO_EV_DIR_SINGLETAP] = "singletap",
[IIO_EV_DIR_DOUBLETAP] = "doubletap",
};
static const char * const iio_modifier_names[] = {
[IIO_MOD_X] = "x",
[IIO_MOD_Y] = "y",
[IIO_MOD_Z] = "z",
[IIO_MOD_X_AND_Y] = "x&y",
[IIO_MOD_X_AND_Z] = "x&z",
[IIO_MOD_Y_AND_Z] = "y&z",
[IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
[IIO_MOD_X_OR_Y] = "x|y",
[IIO_MOD_X_OR_Z] = "x|z",
[IIO_MOD_Y_OR_Z] = "y|z",
[IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
[IIO_MOD_LIGHT_BOTH] = "both",
[IIO_MOD_LIGHT_IR] = "ir",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
[IIO_MOD_LIGHT_CLEAR] = "clear",
[IIO_MOD_LIGHT_RED] = "red",
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
[IIO_MOD_LIGHT_UV] = "uv",
[IIO_MOD_LIGHT_DUV] = "duv",
[IIO_MOD_QUATERNION] = "quaternion",
[IIO_MOD_TEMP_AMBIENT] = "ambient",
[IIO_MOD_TEMP_OBJECT] = "object",
[IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
[IIO_MOD_NORTH_TRUE] = "from_north_true",
[IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
[IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
[IIO_MOD_RUNNING] = "running",
[IIO_MOD_JOGGING] = "jogging",
[IIO_MOD_WALKING] = "walking",
[IIO_MOD_STILL] = "still",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
[IIO_MOD_I] = "i",
[IIO_MOD_Q] = "q",
[IIO_MOD_CO2] = "co2",
[IIO_MOD_ETHANOL] = "ethanol",
[IIO_MOD_H2] = "h2",
[IIO_MOD_VOC] = "voc",
[IIO_MOD_PM1] = "pm1",
[IIO_MOD_PM2P5] = "pm2p5",
[IIO_MOD_PM4] = "pm4",
[IIO_MOD_PM10] = "pm10",
[IIO_MOD_O2] = "o2",
[IIO_MOD_LINEAR_X] = "linear_x",
[IIO_MOD_LINEAR_Y] = "linear_y",
[IIO_MOD_LINEAR_Z] = "linear_z",
[IIO_MOD_PITCH] = "pitch",
[IIO_MOD_YAW] = "yaw",
[IIO_MOD_ROLL] = "roll",
};
static bool event_is_known(struct iio_event_data *event)
{
enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id);
enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id);
enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id);
enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id);
switch (type) {
case IIO_VOLTAGE:
case IIO_CURRENT:
case IIO_POWER:
case IIO_ACCEL:
case IIO_ANGL_VEL:
case IIO_MAGN:
case IIO_LIGHT:
case IIO_INTENSITY:
case IIO_PROXIMITY:
case IIO_TEMP:
case IIO_INCLI:
case IIO_ROT:
case IIO_ANGL:
case IIO_TIMESTAMP:
case IIO_CAPACITANCE:
case IIO_ALTVOLTAGE:
case IIO_CCT:
case IIO_PRESSURE:
case IIO_HUMIDITYRELATIVE:
case IIO_ACTIVITY:
case IIO_STEPS:
case IIO_ENERGY:
case IIO_DISTANCE:
case IIO_VELOCITY:
case IIO_CONCENTRATION:
case IIO_RESISTANCE:
case IIO_PH:
case IIO_UVINDEX:
case IIO_GRAVITY:
case IIO_POSITIONRELATIVE:
case IIO_PHASE:
case IIO_MASSCONCENTRATION:
break;
default:
return false;
}
switch (mod) {
case IIO_NO_MOD:
case IIO_MOD_X:
case IIO_MOD_Y:
case IIO_MOD_Z:
case IIO_MOD_X_AND_Y:
case IIO_MOD_X_AND_Z:
case IIO_MOD_Y_AND_Z:
case IIO_MOD_X_AND_Y_AND_Z:
case IIO_MOD_X_OR_Y:
case IIO_MOD_X_OR_Z:
case IIO_MOD_Y_OR_Z:
case IIO_MOD_X_OR_Y_OR_Z:
case IIO_MOD_LIGHT_BOTH:
case IIO_MOD_LIGHT_IR:
case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
case IIO_MOD_SUM_SQUARED_X_Y_Z:
case IIO_MOD_LIGHT_CLEAR:
case IIO_MOD_LIGHT_RED:
case IIO_MOD_LIGHT_GREEN:
case IIO_MOD_LIGHT_BLUE:
case IIO_MOD_LIGHT_UV:
case IIO_MOD_LIGHT_DUV:
case IIO_MOD_QUATERNION:
case IIO_MOD_TEMP_AMBIENT:
case IIO_MOD_TEMP_OBJECT:
case IIO_MOD_NORTH_MAGN:
case IIO_MOD_NORTH_TRUE:
case IIO_MOD_NORTH_MAGN_TILT_COMP:
case IIO_MOD_NORTH_TRUE_TILT_COMP:
case IIO_MOD_RUNNING:
case IIO_MOD_JOGGING:
case IIO_MOD_WALKING:
case IIO_MOD_STILL:
case IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z:
case IIO_MOD_I:
case IIO_MOD_Q:
case IIO_MOD_CO2:
case IIO_MOD_ETHANOL:
case IIO_MOD_H2:
case IIO_MOD_VOC:
case IIO_MOD_PM1:
case IIO_MOD_PM2P5:
case IIO_MOD_PM4:
case IIO_MOD_PM10:
case IIO_MOD_O2:
break;
default:
return false;
}
switch (ev_type) {
case IIO_EV_TYPE_THRESH:
case IIO_EV_TYPE_MAG:
case IIO_EV_TYPE_ROC:
case IIO_EV_TYPE_THRESH_ADAPTIVE:
case IIO_EV_TYPE_MAG_ADAPTIVE:
case IIO_EV_TYPE_CHANGE:
case IIO_EV_TYPE_GESTURE:
break;
default:
return false;
}
switch (dir) {
case IIO_EV_DIR_EITHER:
case IIO_EV_DIR_RISING:
case IIO_EV_DIR_FALLING:
case IIO_EV_DIR_SINGLETAP:
case IIO_EV_DIR_DOUBLETAP:
case IIO_EV_DIR_NONE:
break;
default:
return false;
}
return true;
}
static void print_event(struct iio_event_data *event)
{
enum iio_chan_type type = IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event->id);
enum iio_modifier mod = IIO_EVENT_CODE_EXTRACT_MODIFIER(event->id);
enum iio_event_type ev_type = IIO_EVENT_CODE_EXTRACT_TYPE(event->id);
enum iio_event_direction dir = IIO_EVENT_CODE_EXTRACT_DIR(event->id);
int chan = IIO_EVENT_CODE_EXTRACT_CHAN(event->id);
int chan2 = IIO_EVENT_CODE_EXTRACT_CHAN2(event->id);
bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id);
if (!event_is_known(event)) {
fprintf(stderr, "Unknown event: time: %lld, id: %llx\n",
event->timestamp, event->id);
return;
}
printf("Event: time: %lld, type: %s", event->timestamp,
iio_chan_type_name_spec[type]);
if (mod != IIO_NO_MOD)
printf("(%s)", iio_modifier_names[mod]);
if (chan >= 0) {
printf(", channel: %d", chan);
if (diff && chan2 >= 0)
printf("-%d", chan2);
}
printf(", evtype: %s", iio_ev_type_text[ev_type]);
if (dir != IIO_EV_DIR_NONE)
printf(", direction: %s", iio_ev_dir_text[dir]);
printf("\n");
fflush(stdout);
}
/* Enable or disable events in sysfs if the knob is available */
static void enable_events(char *dev_dir, int enable)
{
const struct dirent *ent;
char evdir[256];
int ret;
DIR *dp;
snprintf(evdir, sizeof(evdir), FORMAT_EVENTS_DIR, dev_dir);
evdir[sizeof(evdir)-1] = '\0';
dp = opendir(evdir);
if (!dp) {
fprintf(stderr, "Enabling/disabling events: can't open %s\n",
evdir);
return;
}
while (ent = readdir(dp), ent) {
if (iioutils_check_suffix(ent->d_name, "_en")) {
printf("%sabling: %s\n",
enable ? "En" : "Dis",
ent->d_name);
ret = write_sysfs_int(ent->d_name, evdir,
enable);
if (ret < 0)
fprintf(stderr, "Failed to enable/disable %s\n",
ent->d_name);
}
}
if (closedir(dp) == -1) {
perror("Enabling/disabling channels: "
"Failed to close directory");
return;
}
}
int main(int argc, char **argv)
{
struct iio_event_data event;
const char *device_name;
char *dev_dir_name = NULL;
char *chrdev_name;
int ret;
int dev_num;
int fd, event_fd;
bool all_events = false;
if (argc == 2) {
device_name = argv[1];
} else if (argc == 3) {
device_name = argv[2];
if (!strcmp(argv[1], "-a"))
all_events = true;
} else {
fprintf(stderr,
"Usage: iio_event_monitor [options] <device_name>\n"
"Listen and display events from IIO devices\n"
" -a Auto-activate all available events\n");
return -1;
}
dev_num = find_type_by_name(device_name, "iio:device");
if (dev_num >= 0) {
printf("Found IIO device with name %s with device number %d\n",
device_name, dev_num);
ret = asprintf(&chrdev_name, "/dev/iio:device%d", dev_num);
if (ret < 0)
return -ENOMEM;
/* Look up sysfs dir as well if we can */
ret = asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
if (ret < 0)
return -ENOMEM;
} else {
/*
* If we can't find an IIO device by name assume device_name is
* an IIO chrdev
*/
chrdev_name = strdup(device_name);
if (!chrdev_name)
return -ENOMEM;
}
if (all_events && dev_dir_name)
enable_events(dev_dir_name, 1);
fd = open(chrdev_name, 0);
if (fd == -1) {
ret = -errno;
fprintf(stderr, "Failed to open %s\n", chrdev_name);
goto error_free_chrdev_name;
}
ret = ioctl(fd, IIO_GET_EVENT_FD_IOCTL, &event_fd);
if (ret == -1 || event_fd == -1) {
ret = -errno;
if (ret == -ENODEV)
fprintf(stderr,
"This device does not support events\n");
else
fprintf(stderr, "Failed to retrieve event fd\n");
if (close(fd) == -1)
perror("Failed to close character device file");
goto error_free_chrdev_name;
}
if (close(fd) == -1) {
ret = -errno;
goto error_free_chrdev_name;
}
while (true) {
ret = read(event_fd, &event, sizeof(event));
if (ret == -1) {
if (errno == EAGAIN) {
fprintf(stderr, "nothing available\n");
continue;
} else {
ret = -errno;
perror("Failed to read event from device");
break;
}
}
if (ret != sizeof(event)) {
fprintf(stderr, "Reading event failed!\n");
ret = -EIO;
break;
}
print_event(&event);
}
if (close(event_fd) == -1)
perror("Failed to close event file");
error_free_chrdev_name:
/* Disable events after use */
if (all_events && dev_dir_name)
enable_events(dev_dir_name, 0);
free(chrdev_name);
return ret;
}
| linux-master | tools/iio/iio_event_monitor.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Industrialio buffer test code.
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is primarily intended as an example application.
* Reads the current buffer setup from sysfs and starts a short capture
* from the specified device, pretty printing the result after appropriate
* conversion.
*
* Command line parameters
* generic_buffer -n <device_name> -t <trigger_name>
* If trigger name is not specified the program assumes you want a dataready
* trigger associated with the device and goes looking for it.
*/
#include <unistd.h>
#include <stdlib.h>
#include <dirent.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/dir.h>
#include <linux/types.h>
#include <string.h>
#include <poll.h>
#include <endian.h>
#include <getopt.h>
#include <inttypes.h>
#include <stdbool.h>
#include <signal.h>
#include <sys/ioctl.h>
#include <linux/iio/buffer.h>
#include "iio_utils.h"
/**
* enum autochan - state for the automatic channel enabling mechanism
*/
enum autochan {
AUTOCHANNELS_DISABLED,
AUTOCHANNELS_ENABLED,
AUTOCHANNELS_ACTIVE,
};
/**
* size_from_channelarray() - calculate the storage size of a scan
* @channels: the channel info array
* @num_channels: number of channels
*
* Has the side effect of filling the channels[i].location values used
* in processing the buffer output.
**/
static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
{
unsigned int bytes = 0;
int i = 0;
while (i < num_channels) {
if (bytes % channels[i].bytes == 0)
channels[i].location = bytes;
else
channels[i].location = bytes - bytes % channels[i].bytes
+ channels[i].bytes;
bytes = channels[i].location + channels[i].bytes;
i++;
}
return bytes;
}
static void print1byte(uint8_t input, struct iio_channel_info *info)
{
/*
* Shift before conversion to avoid sign extension
* of left aligned data
*/
input >>= info->shift;
input &= info->mask;
if (info->is_signed) {
int8_t val = (int8_t)(input << (8 - info->bits_used)) >>
(8 - info->bits_used);
printf("%05f ", ((float)val + info->offset) * info->scale);
} else {
printf("%05f ", ((float)input + info->offset) * info->scale);
}
}
static void print2byte(uint16_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
input = be16toh(input);
else
input = le16toh(input);
/*
* Shift before conversion to avoid sign extension
* of left aligned data
*/
input >>= info->shift;
input &= info->mask;
if (info->is_signed) {
int16_t val = (int16_t)(input << (16 - info->bits_used)) >>
(16 - info->bits_used);
printf("%05f ", ((float)val + info->offset) * info->scale);
} else {
printf("%05f ", ((float)input + info->offset) * info->scale);
}
}
static void print4byte(uint32_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
input = be32toh(input);
else
input = le32toh(input);
/*
* Shift before conversion to avoid sign extension
* of left aligned data
*/
input >>= info->shift;
input &= info->mask;
if (info->is_signed) {
int32_t val = (int32_t)(input << (32 - info->bits_used)) >>
(32 - info->bits_used);
printf("%05f ", ((float)val + info->offset) * info->scale);
} else {
printf("%05f ", ((float)input + info->offset) * info->scale);
}
}
static void print8byte(uint64_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
input = be64toh(input);
else
input = le64toh(input);
/*
* Shift before conversion to avoid sign extension
* of left aligned data
*/
input >>= info->shift;
input &= info->mask;
if (info->is_signed) {
int64_t val = (int64_t)(input << (64 - info->bits_used)) >>
(64 - info->bits_used);
/* special case for timestamp */
if (info->scale == 1.0f && info->offset == 0.0f)
printf("%" PRId64 " ", val);
else
printf("%05f ",
((float)val + info->offset) * info->scale);
} else {
printf("%05f ", ((float)input + info->offset) * info->scale);
}
}
/**
* process_scan() - print out the values in SI units
* @data: pointer to the start of the scan
* @channels: information about the channels.
* Note: size_from_channelarray must have been called first
* to fill the location offsets.
* @num_channels: number of channels
**/
static void process_scan(char *data, struct iio_channel_info *channels,
int num_channels)
{
int k;
for (k = 0; k < num_channels; k++)
switch (channels[k].bytes) {
/* only a few cases implemented so far */
case 1:
print1byte(*(uint8_t *)(data + channels[k].location),
&channels[k]);
break;
case 2:
print2byte(*(uint16_t *)(data + channels[k].location),
&channels[k]);
break;
case 4:
print4byte(*(uint32_t *)(data + channels[k].location),
&channels[k]);
break;
case 8:
print8byte(*(uint64_t *)(data + channels[k].location),
&channels[k]);
break;
default:
break;
}
printf("\n");
}
static int enable_disable_all_channels(char *dev_dir_name, int buffer_idx, int enable)
{
const struct dirent *ent;
char scanelemdir[256];
DIR *dp;
int ret;
snprintf(scanelemdir, sizeof(scanelemdir),
FORMAT_SCAN_ELEMENTS_DIR, dev_dir_name, buffer_idx);
scanelemdir[sizeof(scanelemdir)-1] = '\0';
dp = opendir(scanelemdir);
if (!dp) {
fprintf(stderr, "Enabling/disabling channels: can't open %s\n",
scanelemdir);
return -EIO;
}
ret = -ENOENT;
while (ent = readdir(dp), ent) {
if (iioutils_check_suffix(ent->d_name, "_en")) {
printf("%sabling: %s\n",
enable ? "En" : "Dis",
ent->d_name);
ret = write_sysfs_int(ent->d_name, scanelemdir,
enable);
if (ret < 0)
fprintf(stderr, "Failed to enable/disable %s\n",
ent->d_name);
}
}
if (closedir(dp) == -1) {
perror("Enabling/disabling channels: "
"Failed to close directory");
return -errno;
}
return 0;
}
static void print_usage(void)
{
fprintf(stderr, "Usage: generic_buffer [options]...\n"
"Capture, convert and output data from IIO device buffer\n"
" -a Auto-activate all available channels\n"
" -A Force-activate ALL channels\n"
" -b <n> The buffer which to open (by index), default 0\n"
" -c <n> Do n conversions, or loop forever if n < 0\n"
" -e Disable wait for event (new data)\n"
" -g Use trigger-less mode\n"
" -l <n> Set buffer length to n samples\n"
" --device-name -n <name>\n"
" --device-num -N <num>\n"
" Set device by name or number (mandatory)\n"
" --trigger-name -t <name>\n"
" --trigger-num -T <num>\n"
" Set trigger by name or number\n"
" -w <n> Set delay between reads in us (event-less mode)\n");
}
static enum autochan autochannels = AUTOCHANNELS_DISABLED;
static char *dev_dir_name = NULL;
static char *buf_dir_name = NULL;
static int buffer_idx = 0;
static bool current_trigger_set = false;
static void cleanup(void)
{
int ret;
/* Disable trigger */
if (dev_dir_name && current_trigger_set) {
/* Disconnect the trigger - just write a dummy name. */
ret = write_sysfs_string("trigger/current_trigger",
dev_dir_name, "NULL");
if (ret < 0)
fprintf(stderr, "Failed to disable trigger: %s\n",
strerror(-ret));
current_trigger_set = false;
}
/* Disable buffer */
if (buf_dir_name) {
ret = write_sysfs_int("enable", buf_dir_name, 0);
if (ret < 0)
fprintf(stderr, "Failed to disable buffer: %s\n",
strerror(-ret));
}
/* Disable channels if auto-enabled */
if (dev_dir_name && autochannels == AUTOCHANNELS_ACTIVE) {
ret = enable_disable_all_channels(dev_dir_name, buffer_idx, 0);
if (ret)
fprintf(stderr, "Failed to disable all channels\n");
autochannels = AUTOCHANNELS_DISABLED;
}
}
static void sig_handler(int signum)
{
fprintf(stderr, "Caught signal %d\n", signum);
cleanup();
exit(-signum);
}
static void register_cleanup(void)
{
struct sigaction sa = { .sa_handler = sig_handler };
const int signums[] = { SIGINT, SIGTERM, SIGABRT };
int ret, i;
for (i = 0; i < ARRAY_SIZE(signums); ++i) {
ret = sigaction(signums[i], &sa, NULL);
if (ret) {
perror("Failed to register signal handler");
exit(-1);
}
}
}
static const struct option longopts[] = {
{ "device-name", 1, 0, 'n' },
{ "device-num", 1, 0, 'N' },
{ "trigger-name", 1, 0, 't' },
{ "trigger-num", 1, 0, 'T' },
{ },
};
int main(int argc, char **argv)
{
long long num_loops = 2;
unsigned long timedelay = 1000000;
unsigned long buf_len = 128;
ssize_t i;
unsigned long long j;
unsigned long toread;
int ret, c;
struct stat st;
int fd = -1;
int buf_fd = -1;
int num_channels = 0;
char *trigger_name = NULL, *device_name = NULL;
char *data = NULL;
ssize_t read_size;
int dev_num = -1, trig_num = -1;
char *buffer_access = NULL;
unsigned int scan_size;
int noevents = 0;
int notrigger = 0;
char *dummy;
bool force_autochannels = false;
struct iio_channel_info *channels = NULL;
register_cleanup();
while ((c = getopt_long(argc, argv, "aAb:c:egl:n:N:t:T:w:?", longopts,
NULL)) != -1) {
switch (c) {
case 'a':
autochannels = AUTOCHANNELS_ENABLED;
break;
case 'A':
autochannels = AUTOCHANNELS_ENABLED;
force_autochannels = true;
break;
case 'b':
errno = 0;
buffer_idx = strtoll(optarg, &dummy, 10);
if (errno) {
ret = -errno;
goto error;
}
if (buffer_idx < 0) {
ret = -ERANGE;
goto error;
}
break;
case 'c':
errno = 0;
num_loops = strtoll(optarg, &dummy, 10);
if (errno) {
ret = -errno;
goto error;
}
break;
case 'e':
noevents = 1;
break;
case 'g':
notrigger = 1;
break;
case 'l':
errno = 0;
buf_len = strtoul(optarg, &dummy, 10);
if (errno) {
ret = -errno;
goto error;
}
break;
case 'n':
device_name = strdup(optarg);
break;
case 'N':
errno = 0;
dev_num = strtoul(optarg, &dummy, 10);
if (errno) {
ret = -errno;
goto error;
}
break;
case 't':
trigger_name = strdup(optarg);
break;
case 'T':
errno = 0;
trig_num = strtoul(optarg, &dummy, 10);
if (errno)
return -errno;
break;
case 'w':
errno = 0;
timedelay = strtoul(optarg, &dummy, 10);
if (errno) {
ret = -errno;
goto error;
}
break;
case '?':
print_usage();
ret = -1;
goto error;
}
}
/* Find the device requested */
if (dev_num < 0 && !device_name) {
fprintf(stderr, "Device not set\n");
print_usage();
ret = -1;
goto error;
} else if (dev_num >= 0 && device_name) {
fprintf(stderr, "Only one of --device-num or --device-name needs to be set\n");
print_usage();
ret = -1;
goto error;
} else if (dev_num < 0) {
dev_num = find_type_by_name(device_name, "iio:device");
if (dev_num < 0) {
fprintf(stderr, "Failed to find the %s\n", device_name);
ret = dev_num;
goto error;
}
}
printf("iio device number being used is %d\n", dev_num);
ret = asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
if (ret < 0)
return -ENOMEM;
/* Fetch device_name if specified by number */
if (!device_name) {
device_name = malloc(IIO_MAX_NAME_LENGTH);
if (!device_name) {
ret = -ENOMEM;
goto error;
}
ret = read_sysfs_string("name", dev_dir_name, device_name);
if (ret < 0) {
fprintf(stderr, "Failed to read name of device %d\n", dev_num);
goto error;
}
}
if (notrigger) {
printf("trigger-less mode selected\n");
} else if (trig_num >= 0) {
char *trig_dev_name;
ret = asprintf(&trig_dev_name, "%strigger%d", iio_dir, trig_num);
if (ret < 0) {
return -ENOMEM;
}
trigger_name = malloc(IIO_MAX_NAME_LENGTH);
ret = read_sysfs_string("name", trig_dev_name, trigger_name);
free(trig_dev_name);
if (ret < 0) {
fprintf(stderr, "Failed to read trigger%d name from\n", trig_num);
return ret;
}
printf("iio trigger number being used is %d\n", trig_num);
} else {
if (!trigger_name) {
/*
* Build the trigger name. If it is device associated
* its name is <device_name>_dev[n] where n matches
* the device number found above.
*/
ret = asprintf(&trigger_name,
"%s-dev%d", device_name, dev_num);
if (ret < 0) {
ret = -ENOMEM;
goto error;
}
}
/* Look for this "-devN" trigger */
trig_num = find_type_by_name(trigger_name, "trigger");
if (trig_num < 0) {
/* OK try the simpler "-trigger" suffix instead */
free(trigger_name);
ret = asprintf(&trigger_name,
"%s-trigger", device_name);
if (ret < 0) {
ret = -ENOMEM;
goto error;
}
}
trig_num = find_type_by_name(trigger_name, "trigger");
if (trig_num < 0) {
fprintf(stderr, "Failed to find the trigger %s\n",
trigger_name);
ret = trig_num;
goto error;
}
printf("iio trigger number being used is %d\n", trig_num);
}
/*
* Parse the files in scan_elements to identify what channels are
* present
*/
ret = build_channel_array(dev_dir_name, buffer_idx, &channels, &num_channels);
if (ret) {
fprintf(stderr, "Problem reading scan element information\n"
"diag %s\n", dev_dir_name);
goto error;
}
if (num_channels && autochannels == AUTOCHANNELS_ENABLED &&
!force_autochannels) {
fprintf(stderr, "Auto-channels selected but some channels "
"are already activated in sysfs\n");
fprintf(stderr, "Proceeding without activating any channels\n");
}
if ((!num_channels && autochannels == AUTOCHANNELS_ENABLED) ||
(autochannels == AUTOCHANNELS_ENABLED && force_autochannels)) {
fprintf(stderr, "Enabling all channels\n");
ret = enable_disable_all_channels(dev_dir_name, buffer_idx, 1);
if (ret) {
fprintf(stderr, "Failed to enable all channels\n");
goto error;
}
/* This flags that we need to disable the channels again */
autochannels = AUTOCHANNELS_ACTIVE;
ret = build_channel_array(dev_dir_name, buffer_idx, &channels,
&num_channels);
if (ret) {
fprintf(stderr, "Problem reading scan element "
"information\n"
"diag %s\n", dev_dir_name);
goto error;
}
if (!num_channels) {
fprintf(stderr, "Still no channels after "
"auto-enabling, giving up\n");
goto error;
}
}
if (!num_channels && autochannels == AUTOCHANNELS_DISABLED) {
fprintf(stderr,
"No channels are enabled, we have nothing to scan.\n");
fprintf(stderr, "Enable channels manually in "
FORMAT_SCAN_ELEMENTS_DIR
"/*_en or pass -a to autoenable channels and "
"try again.\n", dev_dir_name, buffer_idx);
ret = -ENOENT;
goto error;
}
/*
* Construct the directory name for the associated buffer.
* As we know that the lis3l02dq has only one buffer this may
* be built rather than found.
*/
ret = asprintf(&buf_dir_name,
"%siio:device%d/buffer%d", iio_dir, dev_num, buffer_idx);
if (ret < 0) {
ret = -ENOMEM;
goto error;
}
if (stat(buf_dir_name, &st)) {
fprintf(stderr, "Could not stat() '%s', got error %d: %s\n",
buf_dir_name, errno, strerror(errno));
ret = -errno;
goto error;
}
if (!S_ISDIR(st.st_mode)) {
fprintf(stderr, "File '%s' is not a directory\n", buf_dir_name);
ret = -EFAULT;
goto error;
}
if (!notrigger) {
printf("%s %s\n", dev_dir_name, trigger_name);
/*
* Set the device trigger to be the data ready trigger found
* above
*/
ret = write_sysfs_string_and_verify("trigger/current_trigger",
dev_dir_name,
trigger_name);
if (ret < 0) {
fprintf(stderr,
"Failed to write current_trigger file\n");
goto error;
}
}
ret = asprintf(&buffer_access, "/dev/iio:device%d", dev_num);
if (ret < 0) {
ret = -ENOMEM;
goto error;
}
/* Attempt to open non blocking the access dev */
fd = open(buffer_access, O_RDONLY | O_NONBLOCK);
if (fd == -1) { /* TODO: If it isn't there make the node */
ret = -errno;
fprintf(stderr, "Failed to open %s\n", buffer_access);
goto error;
}
/* specify for which buffer index we want an FD */
buf_fd = buffer_idx;
ret = ioctl(fd, IIO_BUFFER_GET_FD_IOCTL, &buf_fd);
if (ret == -1 || buf_fd == -1) {
ret = -errno;
if (ret == -ENODEV || ret == -EINVAL)
fprintf(stderr,
"Device does not have this many buffers\n");
else
fprintf(stderr, "Failed to retrieve buffer fd\n");
goto error;
}
/* Setup ring buffer parameters */
ret = write_sysfs_int("length", buf_dir_name, buf_len);
if (ret < 0)
goto error;
/* Enable the buffer */
ret = write_sysfs_int("enable", buf_dir_name, 1);
if (ret < 0) {
fprintf(stderr,
"Failed to enable buffer '%s': %s\n",
buf_dir_name, strerror(-ret));
goto error;
}
scan_size = size_from_channelarray(channels, num_channels);
size_t total_buf_len = scan_size * buf_len;
if (scan_size > 0 && total_buf_len / scan_size != buf_len) {
ret = -EFAULT;
perror("Integer overflow happened when calculate scan_size * buf_len");
goto error;
}
data = malloc(total_buf_len);
if (!data) {
ret = -ENOMEM;
goto error;
}
/**
* This check is being done here for sanity reasons, however it
* should be omitted under normal operation.
* If this is buffer0, we check that we get EBUSY after this point.
*/
if (buffer_idx == 0) {
errno = 0;
read_size = read(fd, data, 1);
if (read_size > -1 || errno != EBUSY) {
ret = -EFAULT;
perror("Reading from '%s' should not be possible after ioctl()");
goto error;
}
}
/* close now the main chardev FD and let the buffer FD work */
if (close(fd) == -1)
perror("Failed to close character device file");
fd = -1;
for (j = 0; j < num_loops || num_loops < 0; j++) {
if (!noevents) {
struct pollfd pfd = {
.fd = buf_fd,
.events = POLLIN,
};
ret = poll(&pfd, 1, -1);
if (ret < 0) {
ret = -errno;
goto error;
} else if (ret == 0) {
continue;
}
} else {
usleep(timedelay);
}
toread = buf_len;
read_size = read(buf_fd, data, toread * scan_size);
if (read_size < 0) {
if (errno == EAGAIN) {
fprintf(stderr, "nothing available\n");
continue;
} else {
break;
}
}
for (i = 0; i < read_size / scan_size; i++)
process_scan(data + scan_size * i, channels,
num_channels);
}
error:
cleanup();
if (fd >= 0 && close(fd) == -1)
perror("Failed to close character device");
if (buf_fd >= 0 && close(buf_fd) == -1)
perror("Failed to close buffer");
free(buffer_access);
free(data);
free(buf_dir_name);
for (i = num_channels - 1; i >= 0; i--) {
free(channels[i].name);
free(channels[i].generic_name);
}
free(channels);
free(trigger_name);
free(device_name);
free(dev_dir_name);
return ret;
}
| linux-master | tools/iio/iio_generic_buffer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Industrial I/O utilities - lsiio.c
*
* Copyright (c) 2010 Manuel Stahl <[email protected]>
*/
#include <string.h>
#include <dirent.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/dir.h>
#include "iio_utils.h"
static enum verbosity {
VERBLEVEL_DEFAULT, /* 0 gives lspci behaviour */
VERBLEVEL_SENSORS, /* 1 lists sensors */
} verblevel = VERBLEVEL_DEFAULT;
const char *type_device = "iio:device";
const char *type_trigger = "trigger";
static inline int check_prefix(const char *str, const char *prefix)
{
return strlen(str) > strlen(prefix) &&
strncmp(str, prefix, strlen(prefix)) == 0;
}
static inline int check_postfix(const char *str, const char *postfix)
{
return strlen(str) > strlen(postfix) &&
strcmp(str + strlen(str) - strlen(postfix), postfix) == 0;
}
static int dump_channels(const char *dev_dir_name)
{
DIR *dp;
const struct dirent *ent;
dp = opendir(dev_dir_name);
if (!dp)
return -errno;
while (ent = readdir(dp), ent)
if (check_prefix(ent->d_name, "in_") &&
(check_postfix(ent->d_name, "_raw") ||
check_postfix(ent->d_name, "_input")))
printf(" %-10s\n", ent->d_name);
return (closedir(dp) == -1) ? -errno : 0;
}
static int dump_one_device(const char *dev_dir_name)
{
char name[IIO_MAX_NAME_LENGTH];
int dev_idx;
int ret;
ret = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device), "%i",
&dev_idx);
if (ret != 1)
return -EINVAL;
ret = read_sysfs_string("name", dev_dir_name, name);
if (ret < 0)
return ret;
printf("Device %03d: %s\n", dev_idx, name);
if (verblevel >= VERBLEVEL_SENSORS)
return dump_channels(dev_dir_name);
return 0;
}
static int dump_one_trigger(const char *dev_dir_name)
{
char name[IIO_MAX_NAME_LENGTH];
int dev_idx;
int ret;
ret = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
"%i", &dev_idx);
if (ret != 1)
return -EINVAL;
ret = read_sysfs_string("name", dev_dir_name, name);
if (ret < 0)
return ret;
printf("Trigger %03d: %s\n", dev_idx, name);
return 0;
}
static int dump_devices(void)
{
const struct dirent *ent;
int ret;
DIR *dp;
dp = opendir(iio_dir);
if (!dp) {
fprintf(stderr, "No industrial I/O devices available\n");
return -ENODEV;
}
while (ent = readdir(dp), ent) {
if (check_prefix(ent->d_name, type_device)) {
char *dev_dir_name;
if (asprintf(&dev_dir_name, "%s%s", iio_dir,
ent->d_name) < 0) {
ret = -ENOMEM;
goto error_close_dir;
}
ret = dump_one_device(dev_dir_name);
if (ret) {
free(dev_dir_name);
goto error_close_dir;
}
free(dev_dir_name);
if (verblevel >= VERBLEVEL_SENSORS)
printf("\n");
}
}
rewinddir(dp);
while (ent = readdir(dp), ent) {
if (check_prefix(ent->d_name, type_trigger)) {
char *dev_dir_name;
if (asprintf(&dev_dir_name, "%s%s", iio_dir,
ent->d_name) < 0) {
ret = -ENOMEM;
goto error_close_dir;
}
ret = dump_one_trigger(dev_dir_name);
if (ret) {
free(dev_dir_name);
goto error_close_dir;
}
free(dev_dir_name);
}
}
return (closedir(dp) == -1) ? -errno : 0;
error_close_dir:
if (closedir(dp) == -1)
perror("dump_devices(): Failed to close directory");
return ret;
}
int main(int argc, char **argv)
{
int c, err = 0;
while ((c = getopt(argc, argv, "v")) != EOF) {
switch (c) {
case 'v':
verblevel++;
break;
case '?':
default:
err++;
break;
}
}
if (err || argc > optind) {
fprintf(stderr, "Usage: lsiio [options]...\n"
"List industrial I/O devices\n"
" -v Increase verbosity (may be given multiple times)\n");
exit(1);
}
return dump_devices();
}
| linux-master | tools/iio/lsiio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* gpio-event-mon - monitor GPIO line events from userspace
*
* Copyright (C) 2016 Linus Walleij
*
* Usage:
* gpio-event-mon -n <device-name> -o <offset>
*/
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
#include <string.h>
#include <poll.h>
#include <fcntl.h>
#include <getopt.h>
#include <inttypes.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <linux/gpio.h>
#include "gpio-utils.h"
int monitor_device(const char *device_name,
unsigned int *lines,
unsigned int num_lines,
struct gpio_v2_line_config *config,
unsigned int loops)
{
struct gpio_v2_line_values values;
char *chrdev_name;
int cfd, lfd;
int ret;
int i = 0;
ret = asprintf(&chrdev_name, "/dev/%s", device_name);
if (ret < 0)
return -ENOMEM;
cfd = open(chrdev_name, 0);
if (cfd == -1) {
ret = -errno;
fprintf(stderr, "Failed to open %s\n", chrdev_name);
goto exit_free_name;
}
ret = gpiotools_request_line(device_name, lines, num_lines, config,
"gpio-event-mon");
if (ret < 0)
goto exit_device_close;
else
lfd = ret;
/* Read initial states */
values.mask = 0;
values.bits = 0;
for (i = 0; i < num_lines; i++)
gpiotools_set_bit(&values.mask, i);
ret = gpiotools_get_values(lfd, &values);
if (ret < 0) {
fprintf(stderr,
"Failed to issue GPIO LINE GET VALUES IOCTL (%d)\n",
ret);
goto exit_line_close;
}
if (num_lines == 1) {
fprintf(stdout, "Monitoring line %d on %s\n", lines[0], device_name);
fprintf(stdout, "Initial line value: %d\n",
gpiotools_test_bit(values.bits, 0));
} else {
fprintf(stdout, "Monitoring lines %d", lines[0]);
for (i = 1; i < num_lines - 1; i++)
fprintf(stdout, ", %d", lines[i]);
fprintf(stdout, " and %d on %s\n", lines[i], device_name);
fprintf(stdout, "Initial line values: %d",
gpiotools_test_bit(values.bits, 0));
for (i = 1; i < num_lines - 1; i++)
fprintf(stdout, ", %d",
gpiotools_test_bit(values.bits, i));
fprintf(stdout, " and %d\n",
gpiotools_test_bit(values.bits, i));
}
i = 0;
while (1) {
struct gpio_v2_line_event event;
ret = read(lfd, &event, sizeof(event));
if (ret == -1) {
if (errno == -EAGAIN) {
fprintf(stderr, "nothing available\n");
continue;
} else {
ret = -errno;
fprintf(stderr, "Failed to read event (%d)\n",
ret);
break;
}
}
if (ret != sizeof(event)) {
fprintf(stderr, "Reading event failed\n");
ret = -EIO;
break;
}
fprintf(stdout, "GPIO EVENT at %" PRIu64 " on line %d (%d|%d) ",
(uint64_t)event.timestamp_ns, event.offset, event.line_seqno,
event.seqno);
switch (event.id) {
case GPIO_V2_LINE_EVENT_RISING_EDGE:
fprintf(stdout, "rising edge");
break;
case GPIO_V2_LINE_EVENT_FALLING_EDGE:
fprintf(stdout, "falling edge");
break;
default:
fprintf(stdout, "unknown event");
}
fprintf(stdout, "\n");
i++;
if (i == loops)
break;
}
exit_line_close:
if (close(lfd) == -1)
perror("Failed to close line file");
exit_device_close:
if (close(cfd) == -1)
perror("Failed to close GPIO character device file");
exit_free_name:
free(chrdev_name);
return ret;
}
void print_usage(void)
{
fprintf(stderr, "Usage: gpio-event-mon [options]...\n"
"Listen to events on GPIO lines, 0->1 1->0\n"
" -n <name> Listen on GPIOs on a named device (must be stated)\n"
" -o <n> Offset of line to monitor (may be repeated)\n"
" -d Set line as open drain\n"
" -s Set line as open source\n"
" -r Listen for rising edges\n"
" -f Listen for falling edges\n"
" -w Report the wall-clock time for events\n"
" -t Report the hardware timestamp for events\n"
" -b <n> Debounce the line with period n microseconds\n"
" [-c <n>] Do <n> loops (optional, infinite loop if not stated)\n"
" -? This helptext\n"
"\n"
"Example:\n"
"gpio-event-mon -n gpiochip0 -o 4 -r -f -b 10000\n"
);
}
#define EDGE_FLAGS \
(GPIO_V2_LINE_FLAG_EDGE_RISING | \
GPIO_V2_LINE_FLAG_EDGE_FALLING)
int main(int argc, char **argv)
{
const char *device_name = NULL;
unsigned int lines[GPIO_V2_LINES_MAX];
unsigned int num_lines = 0;
unsigned int loops = 0;
struct gpio_v2_line_config config;
int c, attr, i;
unsigned long debounce_period_us = 0;
memset(&config, 0, sizeof(config));
config.flags = GPIO_V2_LINE_FLAG_INPUT;
while ((c = getopt(argc, argv, "c:n:o:b:dsrfwt?")) != -1) {
switch (c) {
case 'c':
loops = strtoul(optarg, NULL, 10);
break;
case 'n':
device_name = optarg;
break;
case 'o':
if (num_lines >= GPIO_V2_LINES_MAX) {
print_usage();
return -1;
}
lines[num_lines] = strtoul(optarg, NULL, 10);
num_lines++;
break;
case 'b':
debounce_period_us = strtoul(optarg, NULL, 10);
break;
case 'd':
config.flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
break;
case 's':
config.flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
break;
case 'r':
config.flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
break;
case 'f':
config.flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
break;
case 'w':
config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
break;
case 't':
config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
break;
case '?':
print_usage();
return -1;
}
}
if (debounce_period_us) {
attr = config.num_attrs;
config.num_attrs++;
for (i = 0; i < num_lines; i++)
gpiotools_set_bit(&config.attrs[attr].mask, i);
config.attrs[attr].attr.id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
config.attrs[attr].attr.debounce_period_us = debounce_period_us;
}
if (!device_name || num_lines == 0) {
print_usage();
return -1;
}
if (!(config.flags & EDGE_FLAGS)) {
printf("No flags specified, listening on both rising and "
"falling edges\n");
config.flags |= EDGE_FLAGS;
}
return monitor_device(device_name, lines, num_lines, &config, loops);
}
| linux-master | tools/gpio/gpio-event-mon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* lsgpio - example on how to list the GPIO lines on a system
*
* Copyright (C) 2015 Linus Walleij
*
* Usage:
* lsgpio <-n device-name>
*/
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
#include <string.h>
#include <poll.h>
#include <fcntl.h>
#include <getopt.h>
#include <sys/ioctl.h>
#include <linux/gpio.h>
#include "gpio-utils.h"
struct gpio_flag {
char *name;
unsigned long long mask;
};
struct gpio_flag flagnames[] = {
{
.name = "used",
.mask = GPIO_V2_LINE_FLAG_USED,
},
{
.name = "input",
.mask = GPIO_V2_LINE_FLAG_INPUT,
},
{
.name = "output",
.mask = GPIO_V2_LINE_FLAG_OUTPUT,
},
{
.name = "active-low",
.mask = GPIO_V2_LINE_FLAG_ACTIVE_LOW,
},
{
.name = "open-drain",
.mask = GPIO_V2_LINE_FLAG_OPEN_DRAIN,
},
{
.name = "open-source",
.mask = GPIO_V2_LINE_FLAG_OPEN_SOURCE,
},
{
.name = "pull-up",
.mask = GPIO_V2_LINE_FLAG_BIAS_PULL_UP,
},
{
.name = "pull-down",
.mask = GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN,
},
{
.name = "bias-disabled",
.mask = GPIO_V2_LINE_FLAG_BIAS_DISABLED,
},
{
.name = "clock-realtime",
.mask = GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME,
},
};
static void print_attributes(struct gpio_v2_line_info *info)
{
int i;
const char *field_format = "%s";
for (i = 0; i < ARRAY_SIZE(flagnames); i++) {
if (info->flags & flagnames[i].mask) {
fprintf(stdout, field_format, flagnames[i].name);
field_format = ", %s";
}
}
if ((info->flags & GPIO_V2_LINE_FLAG_EDGE_RISING) &&
(info->flags & GPIO_V2_LINE_FLAG_EDGE_FALLING))
fprintf(stdout, field_format, "both-edges");
else if (info->flags & GPIO_V2_LINE_FLAG_EDGE_RISING)
fprintf(stdout, field_format, "rising-edge");
else if (info->flags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
fprintf(stdout, field_format, "falling-edge");
for (i = 0; i < info->num_attrs; i++) {
if (info->attrs[i].id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE)
fprintf(stdout, ", debounce_period=%dusec",
info->attrs[i].debounce_period_us);
}
}
int list_device(const char *device_name)
{
struct gpiochip_info cinfo;
char *chrdev_name;
int fd;
int ret;
int i;
ret = asprintf(&chrdev_name, "/dev/%s", device_name);
if (ret < 0)
return -ENOMEM;
fd = open(chrdev_name, 0);
if (fd == -1) {
ret = -errno;
fprintf(stderr, "Failed to open %s\n", chrdev_name);
goto exit_free_name;
}
/* Inspect this GPIO chip */
ret = ioctl(fd, GPIO_GET_CHIPINFO_IOCTL, &cinfo);
if (ret == -1) {
ret = -errno;
perror("Failed to issue CHIPINFO IOCTL\n");
goto exit_close_error;
}
fprintf(stdout, "GPIO chip: %s, \"%s\", %u GPIO lines\n",
cinfo.name, cinfo.label, cinfo.lines);
/* Loop over the lines and print info */
for (i = 0; i < cinfo.lines; i++) {
struct gpio_v2_line_info linfo;
memset(&linfo, 0, sizeof(linfo));
linfo.offset = i;
ret = ioctl(fd, GPIO_V2_GET_LINEINFO_IOCTL, &linfo);
if (ret == -1) {
ret = -errno;
perror("Failed to issue LINEINFO IOCTL\n");
goto exit_close_error;
}
fprintf(stdout, "\tline %2d:", linfo.offset);
if (linfo.name[0])
fprintf(stdout, " \"%s\"", linfo.name);
else
fprintf(stdout, " unnamed");
if (linfo.consumer[0])
fprintf(stdout, " \"%s\"", linfo.consumer);
else
fprintf(stdout, " unused");
if (linfo.flags) {
fprintf(stdout, " [");
print_attributes(&linfo);
fprintf(stdout, "]");
}
fprintf(stdout, "\n");
}
exit_close_error:
if (close(fd) == -1)
perror("Failed to close GPIO character device file");
exit_free_name:
free(chrdev_name);
return ret;
}
void print_usage(void)
{
fprintf(stderr, "Usage: lsgpio [options]...\n"
"List GPIO chips, lines and states\n"
" -n <name> List GPIOs on a named device\n"
" -? This helptext\n"
);
}
int main(int argc, char **argv)
{
const char *device_name = NULL;
int ret;
int c;
while ((c = getopt(argc, argv, "n:")) != -1) {
switch (c) {
case 'n':
device_name = optarg;
break;
case '?':
print_usage();
return -1;
}
}
if (device_name)
ret = list_device(device_name);
else {
const struct dirent *ent;
DIR *dp;
/* List all GPIO devices one at a time */
dp = opendir("/dev");
if (!dp) {
ret = -errno;
goto error_out;
}
ret = -ENOENT;
while (ent = readdir(dp), ent) {
if (check_prefix(ent->d_name, "gpiochip")) {
ret = list_device(ent->d_name);
if (ret)
break;
}
}
ret = 0;
if (closedir(dp) == -1) {
perror("scanning devices: Failed to close directory");
ret = -errno;
}
}
error_out:
return ret;
}
| linux-master | tools/gpio/lsgpio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* gpio-watch - monitor unrequested lines for property changes using the
* character device
*
* Copyright (C) 2019 BayLibre SAS
* Author: Bartosz Golaszewski <[email protected]>
*/
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <linux/gpio.h>
#include <poll.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
int main(int argc, char **argv)
{
struct gpio_v2_line_info_changed chg;
struct gpio_v2_line_info req;
struct pollfd pfd;
int fd, i, j, ret;
char *event, *end;
ssize_t rd;
if (argc < 3)
goto err_usage;
fd = open(argv[1], O_RDWR | O_CLOEXEC);
if (fd < 0) {
perror("unable to open gpiochip");
return EXIT_FAILURE;
}
for (i = 0, j = 2; i < argc - 2; i++, j++) {
memset(&req, 0, sizeof(req));
req.offset = strtoul(argv[j], &end, 0);
if (*end != '\0')
goto err_usage;
ret = ioctl(fd, GPIO_V2_GET_LINEINFO_WATCH_IOCTL, &req);
if (ret) {
perror("unable to set up line watch");
return EXIT_FAILURE;
}
}
pfd.fd = fd;
pfd.events = POLLIN | POLLPRI;
for (;;) {
ret = poll(&pfd, 1, 5000);
if (ret < 0) {
perror("error polling the linechanged fd");
return EXIT_FAILURE;
} else if (ret > 0) {
memset(&chg, 0, sizeof(chg));
rd = read(pfd.fd, &chg, sizeof(chg));
if (rd < 0 || rd != sizeof(chg)) {
if (rd != sizeof(chg))
errno = EIO;
perror("error reading line change event");
return EXIT_FAILURE;
}
switch (chg.event_type) {
case GPIO_V2_LINE_CHANGED_REQUESTED:
event = "requested";
break;
case GPIO_V2_LINE_CHANGED_RELEASED:
event = "released";
break;
case GPIO_V2_LINE_CHANGED_CONFIG:
event = "config changed";
break;
default:
fprintf(stderr,
"invalid event type received from the kernel\n");
return EXIT_FAILURE;
}
printf("line %u: %s at %" PRIu64 "\n",
chg.info.offset, event, (uint64_t)chg.timestamp_ns);
}
}
return 0;
err_usage:
printf("%s: <gpiochip> <line0> <line1> ...\n", argv[0]);
return EXIT_FAILURE;
}
| linux-master | tools/gpio/gpio-watch.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO tools - helpers library for the GPIO tools
*
* Copyright (C) 2015 Linus Walleij
* Copyright (C) 2016 Bamvor Jian Zhang
*/
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <getopt.h>
#include <sys/ioctl.h>
#include <linux/gpio.h>
#include "gpio-utils.h"
#define CONSUMER "gpio-utils"
/**
* DOC: Operation of gpio
*
* Provide the api of gpiochip for chardev interface. There are two
* types of api. The first one provide as same function as each
* ioctl, including request and release for lines of gpio, read/write
* the value of gpio. If the user want to do lots of read and write of
* lines of gpio, user should use this type of api.
*
* The second one provide the easy to use api for user. Each of the
* following api will request gpio lines, do the operation and then
* release these lines.
*/
/**
* gpiotools_request_line() - request gpio lines in a gpiochip
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0"
* @lines: An array desired lines, specified by offset
* index for the associated GPIO device.
* @num_lines: The number of lines to request.
* @config: The new config for requested gpio. Reference
* "linux/gpio.h" for config details.
* @consumer: The name of consumer, such as "sysfs",
* "powerkey". This is useful for other users to
* know who is using.
*
* Request gpio lines through the ioctl provided by chardev. User
* could call gpiotools_set_values() and gpiotools_get_values() to
* read and write respectively through the returned fd. Call
* gpiotools_release_line() to release these lines after that.
*
* Return: On success return the fd;
* On failure return the errno.
*/
int gpiotools_request_line(const char *device_name, unsigned int *lines,
unsigned int num_lines,
struct gpio_v2_line_config *config,
const char *consumer)
{
struct gpio_v2_line_request req;
char *chrdev_name;
int fd;
int i;
int ret;
ret = asprintf(&chrdev_name, "/dev/%s", device_name);
if (ret < 0)
return -ENOMEM;
fd = open(chrdev_name, 0);
if (fd == -1) {
ret = -errno;
fprintf(stderr, "Failed to open %s, %s\n",
chrdev_name, strerror(errno));
goto exit_free_name;
}
memset(&req, 0, sizeof(req));
for (i = 0; i < num_lines; i++)
req.offsets[i] = lines[i];
req.config = *config;
strcpy(req.consumer, consumer);
req.num_lines = num_lines;
ret = ioctl(fd, GPIO_V2_GET_LINE_IOCTL, &req);
if (ret == -1) {
ret = -errno;
fprintf(stderr, "Failed to issue %s (%d), %s\n",
"GPIO_GET_LINE_IOCTL", ret, strerror(errno));
}
if (close(fd) == -1)
perror("Failed to close GPIO character device file");
exit_free_name:
free(chrdev_name);
return ret < 0 ? ret : req.fd;
}
/**
* gpiotools_set_values() - Set the value of gpio(s)
* @fd: The fd returned by
* gpiotools_request_line().
* @values: The array of values want to set.
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_set_values(const int fd, struct gpio_v2_line_values *values)
{
int ret;
ret = ioctl(fd, GPIO_V2_LINE_SET_VALUES_IOCTL, values);
if (ret == -1) {
ret = -errno;
fprintf(stderr, "Failed to issue %s (%d), %s\n",
"GPIOHANDLE_SET_LINE_VALUES_IOCTL", ret,
strerror(errno));
}
return ret;
}
/**
* gpiotools_get_values() - Get the value of gpio(s)
* @fd: The fd returned by
* gpiotools_request_line().
* @values: The array of values get from hardware.
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_get_values(const int fd, struct gpio_v2_line_values *values)
{
int ret;
ret = ioctl(fd, GPIO_V2_LINE_GET_VALUES_IOCTL, values);
if (ret == -1) {
ret = -errno;
fprintf(stderr, "Failed to issue %s (%d), %s\n",
"GPIOHANDLE_GET_LINE_VALUES_IOCTL", ret,
strerror(errno));
}
return ret;
}
/**
* gpiotools_release_line() - Release the line(s) of gpiochip
* @fd: The fd returned by
* gpiotools_request_line().
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_release_line(const int fd)
{
int ret;
ret = close(fd);
if (ret == -1) {
perror("Failed to close GPIO LINE device file");
ret = -errno;
}
return ret;
}
/**
* gpiotools_get() - Get value from specific line
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0"
* @line: number of line, such as 2.
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_get(const char *device_name, unsigned int line)
{
int ret;
unsigned int value;
unsigned int lines[] = {line};
ret = gpiotools_gets(device_name, lines, 1, &value);
if (ret)
return ret;
return value;
}
/**
* gpiotools_gets() - Get values from specific lines.
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0".
* @lines: An array desired lines, specified by offset
* index for the associated GPIO device.
* @num_lines: The number of lines to request.
* @values: The array of values get from gpiochip.
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_gets(const char *device_name, unsigned int *lines,
unsigned int num_lines, unsigned int *values)
{
int fd, i;
int ret;
int ret_close;
struct gpio_v2_line_config config;
struct gpio_v2_line_values lv;
memset(&config, 0, sizeof(config));
config.flags = GPIO_V2_LINE_FLAG_INPUT;
ret = gpiotools_request_line(device_name, lines, num_lines,
&config, CONSUMER);
if (ret < 0)
return ret;
fd = ret;
for (i = 0; i < num_lines; i++)
gpiotools_set_bit(&lv.mask, i);
ret = gpiotools_get_values(fd, &lv);
if (!ret)
for (i = 0; i < num_lines; i++)
values[i] = gpiotools_test_bit(lv.bits, i);
ret_close = gpiotools_release_line(fd);
return ret < 0 ? ret : ret_close;
}
/**
* gpiotools_set() - Set value to specific line
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0"
* @line: number of line, such as 2.
* @value: The value of gpio, must be 0(low) or 1(high).
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_set(const char *device_name, unsigned int line,
unsigned int value)
{
unsigned int lines[] = {line};
return gpiotools_sets(device_name, lines, 1, &value);
}
/**
* gpiotools_sets() - Set values to specific lines.
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0".
* @lines: An array desired lines, specified by offset
* index for the associated GPIO device.
* @num_lines: The number of lines to request.
* @values: The array of values set to gpiochip, must be
* 0(low) or 1(high).
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_sets(const char *device_name, unsigned int *lines,
unsigned int num_lines, unsigned int *values)
{
int ret, i;
struct gpio_v2_line_config config;
memset(&config, 0, sizeof(config));
config.flags = GPIO_V2_LINE_FLAG_OUTPUT;
config.num_attrs = 1;
config.attrs[0].attr.id = GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES;
for (i = 0; i < num_lines; i++) {
gpiotools_set_bit(&config.attrs[0].mask, i);
gpiotools_assign_bit(&config.attrs[0].attr.values,
i, values[i]);
}
ret = gpiotools_request_line(device_name, lines, num_lines,
&config, CONSUMER);
if (ret < 0)
return ret;
return gpiotools_release_line(ret);
}
| linux-master | tools/gpio/gpio-utils.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* gpio-hammer - example swiss army knife to shake GPIO lines on a system
*
* Copyright (C) 2016 Linus Walleij
*
* Usage:
* gpio-hammer -n <device-name> -o <offset1> -o <offset2>
*/
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
#include <string.h>
#include <poll.h>
#include <fcntl.h>
#include <getopt.h>
#include <sys/ioctl.h>
#include <linux/gpio.h>
#include "gpio-utils.h"
int hammer_device(const char *device_name, unsigned int *lines, int num_lines,
unsigned int loops)
{
struct gpio_v2_line_values values;
struct gpio_v2_line_config config;
char swirr[] = "-\\|/";
int fd;
int ret;
int i, j;
unsigned int iteration = 0;
memset(&config, 0, sizeof(config));
config.flags = GPIO_V2_LINE_FLAG_OUTPUT;
ret = gpiotools_request_line(device_name, lines, num_lines,
&config, "gpio-hammer");
if (ret < 0)
goto exit_error;
else
fd = ret;
values.mask = 0;
values.bits = 0;
for (i = 0; i < num_lines; i++)
gpiotools_set_bit(&values.mask, i);
ret = gpiotools_get_values(fd, &values);
if (ret < 0)
goto exit_close_error;
fprintf(stdout, "Hammer lines [");
for (i = 0; i < num_lines; i++) {
fprintf(stdout, "%d", lines[i]);
if (i != (num_lines - 1))
fprintf(stdout, ", ");
}
fprintf(stdout, "] on %s, initial states: [", device_name);
for (i = 0; i < num_lines; i++) {
fprintf(stdout, "%d", gpiotools_test_bit(values.bits, i));
if (i != (num_lines - 1))
fprintf(stdout, ", ");
}
fprintf(stdout, "]\n");
/* Hammertime! */
j = 0;
while (1) {
/* Invert all lines so we blink */
for (i = 0; i < num_lines; i++)
gpiotools_change_bit(&values.bits, i);
ret = gpiotools_set_values(fd, &values);
if (ret < 0)
goto exit_close_error;
/* Re-read values to get status */
ret = gpiotools_get_values(fd, &values);
if (ret < 0)
goto exit_close_error;
fprintf(stdout, "[%c] ", swirr[j]);
j++;
if (j == sizeof(swirr) - 1)
j = 0;
fprintf(stdout, "[");
for (i = 0; i < num_lines; i++) {
fprintf(stdout, "%d: %d", lines[i],
gpiotools_test_bit(values.bits, i));
if (i != (num_lines - 1))
fprintf(stdout, ", ");
}
fprintf(stdout, "]\r");
fflush(stdout);
sleep(1);
iteration++;
if (loops && iteration == loops)
break;
}
fprintf(stdout, "\n");
ret = 0;
exit_close_error:
gpiotools_release_line(fd);
exit_error:
return ret;
}
void print_usage(void)
{
fprintf(stderr, "Usage: gpio-hammer [options]...\n"
"Hammer GPIO lines, 0->1->0->1...\n"
" -n <name> Hammer GPIOs on a named device (must be stated)\n"
" -o <n> Offset[s] to hammer, at least one, several can be stated\n"
" [-c <n>] Do <n> loops (optional, infinite loop if not stated)\n"
" -? This helptext\n"
"\n"
"Example:\n"
"gpio-hammer -n gpiochip0 -o 4\n"
);
}
int main(int argc, char **argv)
{
const char *device_name = NULL;
unsigned int lines[GPIOHANDLES_MAX];
unsigned int loops = 0;
int num_lines;
int c;
int i;
i = 0;
while ((c = getopt(argc, argv, "c:n:o:?")) != -1) {
switch (c) {
case 'c':
loops = strtoul(optarg, NULL, 10);
break;
case 'n':
device_name = optarg;
break;
case 'o':
/*
* Avoid overflow. Do not immediately error, we want to
* be able to accurately report on the amount of times
* '-o' was given to give an accurate error message
*/
if (i < GPIOHANDLES_MAX)
lines[i] = strtoul(optarg, NULL, 10);
i++;
break;
case '?':
print_usage();
return -1;
}
}
if (i >= GPIOHANDLES_MAX) {
fprintf(stderr,
"Only %d occurrences of '-o' are allowed, %d were found\n",
GPIOHANDLES_MAX, i + 1);
return -1;
}
num_lines = i;
if (!device_name || !num_lines) {
print_usage();
return -1;
}
return hammer_device(device_name, lines, num_lines, loops);
}
| linux-master | tools/gpio/gpio-hammer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2020 Matt Helsley <[email protected]>
* Weak definitions necessary to compile objtool without
* some subcommands (e.g. check, orc).
*/
#include <stdbool.h>
#include <errno.h>
#include <objtool/objtool.h>
#define UNSUPPORTED(name) \
({ \
fprintf(stderr, "error: objtool: " name " not implemented\n"); \
return ENOSYS; \
})
int __weak orc_dump(const char *_objname)
{
UNSUPPORTED("ORC");
}
int __weak orc_create(struct objtool_file *file)
{
UNSUPPORTED("ORC");
}
| linux-master | tools/objtool/weak.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Josh Poimboeuf <[email protected]>
*/
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <subcmd/exec-cmd.h>
#include <subcmd/pager.h>
#include <linux/kernel.h>
#include <objtool/builtin.h>
#include <objtool/objtool.h>
#include <objtool/warn.h>
bool help;
const char *objname;
static struct objtool_file file;
static bool objtool_create_backup(const char *_objname)
{
int len = strlen(_objname);
char *buf, *base, *name = malloc(len+6);
int s, d, l, t;
if (!name) {
perror("failed backup name malloc");
return false;
}
strcpy(name, _objname);
strcpy(name + len, ".orig");
d = open(name, O_CREAT|O_WRONLY|O_TRUNC, 0644);
if (d < 0) {
perror("failed to create backup file");
return false;
}
s = open(_objname, O_RDONLY);
if (s < 0) {
perror("failed to open orig file");
return false;
}
buf = malloc(4096);
if (!buf) {
perror("failed backup data malloc");
return false;
}
while ((l = read(s, buf, 4096)) > 0) {
base = buf;
do {
t = write(d, base, l);
if (t < 0) {
perror("failed backup write");
return false;
}
base += t;
l -= t;
} while (l);
}
if (l < 0) {
perror("failed backup read");
return false;
}
free(name);
free(buf);
close(d);
close(s);
return true;
}
struct objtool_file *objtool_open_read(const char *_objname)
{
if (objname) {
if (strcmp(objname, _objname)) {
WARN("won't handle more than one file at a time");
return NULL;
}
return &file;
}
objname = _objname;
file.elf = elf_open_read(objname, O_RDWR);
if (!file.elf)
return NULL;
if (opts.backup && !objtool_create_backup(objname)) {
WARN("can't create backup file");
return NULL;
}
hash_init(file.insn_hash);
INIT_LIST_HEAD(&file.retpoline_call_list);
INIT_LIST_HEAD(&file.return_thunk_list);
INIT_LIST_HEAD(&file.static_call_list);
INIT_LIST_HEAD(&file.mcount_loc_list);
INIT_LIST_HEAD(&file.endbr_list);
INIT_LIST_HEAD(&file.call_list);
file.ignore_unreachables = opts.no_unreachable;
file.hints = false;
return &file;
}
void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
{
if (!opts.noinstr)
return;
if (!f->pv_ops) {
WARN("paravirt confusion");
return;
}
/*
* These functions will be patched into native code,
* see paravirt_patch().
*/
if (!strcmp(func->name, "_paravirt_nop") ||
!strcmp(func->name, "_paravirt_ident_64"))
return;
/* already added this function */
if (!list_empty(&func->pv_target))
return;
list_add(&func->pv_target, &f->pv_ops[idx].targets);
f->pv_ops[idx].clean = false;
}
int main(int argc, const char **argv)
{
static const char *UNUSED = "OBJTOOL_NOT_IMPLEMENTED";
/* libsubcmd init */
exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
pager_init(UNUSED);
objtool_run(argc, argv);
return 0;
}
| linux-master | tools/objtool/objtool.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015-2017 Josh Poimboeuf <[email protected]>
*/
#include <string.h>
#include <stdlib.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <objtool/builtin.h>
#include <objtool/cfi.h>
#include <objtool/arch.h>
#include <objtool/check.h>
#include <objtool/special.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
#include <linux/objtool_types.h>
#include <linux/hashtable.h>
#include <linux/kernel.h>
#include <linux/static_call_types.h>
struct alternative {
struct alternative *next;
struct instruction *insn;
bool skip_orig;
};
static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
static struct cfi_init_state initial_func_cfi;
static struct cfi_state init_cfi;
static struct cfi_state func_cfi;
static struct cfi_state force_undefined_cfi;
struct instruction *find_insn(struct objtool_file *file,
struct section *sec, unsigned long offset)
{
struct instruction *insn;
hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
if (insn->sec == sec && insn->offset == offset)
return insn;
}
return NULL;
}
struct instruction *next_insn_same_sec(struct objtool_file *file,
struct instruction *insn)
{
if (insn->idx == INSN_CHUNK_MAX)
return find_insn(file, insn->sec, insn->offset + insn->len);
insn++;
if (!insn->len)
return NULL;
return insn;
}
static struct instruction *next_insn_same_func(struct objtool_file *file,
struct instruction *insn)
{
struct instruction *next = next_insn_same_sec(file, insn);
struct symbol *func = insn_func(insn);
if (!func)
return NULL;
if (next && insn_func(next) == func)
return next;
/* Check if we're already in the subfunction: */
if (func == func->cfunc)
return NULL;
/* Move to the subfunction: */
return find_insn(file, func->cfunc->sec, func->cfunc->offset);
}
static struct instruction *prev_insn_same_sec(struct objtool_file *file,
struct instruction *insn)
{
if (insn->idx == 0) {
if (insn->prev_len)
return find_insn(file, insn->sec, insn->offset - insn->prev_len);
return NULL;
}
return insn - 1;
}
static struct instruction *prev_insn_same_sym(struct objtool_file *file,
struct instruction *insn)
{
struct instruction *prev = prev_insn_same_sec(file, insn);
if (prev && insn_func(prev) == insn_func(insn))
return prev;
return NULL;
}
#define for_each_insn(file, insn) \
for (struct section *__sec, *__fake = (struct section *)1; \
__fake; __fake = NULL) \
for_each_sec(file, __sec) \
sec_for_each_insn(file, __sec, insn)
#define func_for_each_insn(file, func, insn) \
for (insn = find_insn(file, func->sec, func->offset); \
insn; \
insn = next_insn_same_func(file, insn))
#define sym_for_each_insn(file, sym, insn) \
for (insn = find_insn(file, sym->sec, sym->offset); \
insn && insn->offset < sym->offset + sym->len; \
insn = next_insn_same_sec(file, insn))
#define sym_for_each_insn_continue_reverse(file, sym, insn) \
for (insn = prev_insn_same_sec(file, insn); \
insn && insn->offset >= sym->offset; \
insn = prev_insn_same_sec(file, insn))
#define sec_for_each_insn_from(file, insn) \
for (; insn; insn = next_insn_same_sec(file, insn))
#define sec_for_each_insn_continue(file, insn) \
for (insn = next_insn_same_sec(file, insn); insn; \
insn = next_insn_same_sec(file, insn))
static inline struct symbol *insn_call_dest(struct instruction *insn)
{
if (insn->type == INSN_JUMP_DYNAMIC ||
insn->type == INSN_CALL_DYNAMIC)
return NULL;
return insn->_call_dest;
}
static inline struct reloc *insn_jump_table(struct instruction *insn)
{
if (insn->type == INSN_JUMP_DYNAMIC ||
insn->type == INSN_CALL_DYNAMIC)
return insn->_jump_table;
return NULL;
}
static bool is_jump_table_jump(struct instruction *insn)
{
struct alt_group *alt_group = insn->alt_group;
if (insn_jump_table(insn))
return true;
/* Retpoline alternative for a jump table? */
return alt_group && alt_group->orig_group &&
insn_jump_table(alt_group->orig_group->first_insn);
}
static bool is_sibling_call(struct instruction *insn)
{
/*
* Assume only STT_FUNC calls have jump-tables.
*/
if (insn_func(insn)) {
/* An indirect jump is either a sibling call or a jump to a table. */
if (insn->type == INSN_JUMP_DYNAMIC)
return !is_jump_table_jump(insn);
}
/* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
return (is_static_jump(insn) && insn_call_dest(insn));
}
/*
* This checks to see if the given function is a "noreturn" function.
*
* For global functions which are outside the scope of this object file, we
* have to keep a manual list of them.
*
* For local functions, we have to detect them manually by simply looking for
* the lack of a return instruction.
*/
static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
int recursion)
{
int i;
struct instruction *insn;
bool empty = true;
#define NORETURN(func) __stringify(func),
static const char * const global_noreturns[] = {
#include "noreturns.h"
};
#undef NORETURN
if (!func)
return false;
if (func->bind == STB_GLOBAL || func->bind == STB_WEAK)
for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
if (!strcmp(func->name, global_noreturns[i]))
return true;
if (func->bind == STB_WEAK)
return false;
if (!func->len)
return false;
insn = find_insn(file, func->sec, func->offset);
if (!insn || !insn_func(insn))
return false;
func_for_each_insn(file, func, insn) {
empty = false;
if (insn->type == INSN_RETURN)
return false;
}
if (empty)
return false;
/*
* A function can have a sibling call instead of a return. In that
* case, the function's dead-end status depends on whether the target
* of the sibling call returns.
*/
func_for_each_insn(file, func, insn) {
if (is_sibling_call(insn)) {
struct instruction *dest = insn->jump_dest;
if (!dest)
/* sibling call to another file */
return false;
/* local sibling call */
if (recursion == 5) {
/*
* Infinite recursion: two functions have
* sibling calls to each other. This is a very
* rare case. It means they aren't dead ends.
*/
return false;
}
return __dead_end_function(file, insn_func(dest), recursion+1);
}
}
return true;
}
static bool dead_end_function(struct objtool_file *file, struct symbol *func)
{
return __dead_end_function(file, func, 0);
}
static void init_cfi_state(struct cfi_state *cfi)
{
int i;
for (i = 0; i < CFI_NUM_REGS; i++) {
cfi->regs[i].base = CFI_UNDEFINED;
cfi->vals[i].base = CFI_UNDEFINED;
}
cfi->cfa.base = CFI_UNDEFINED;
cfi->drap_reg = CFI_UNDEFINED;
cfi->drap_offset = -1;
}
static void init_insn_state(struct objtool_file *file, struct insn_state *state,
struct section *sec)
{
memset(state, 0, sizeof(*state));
init_cfi_state(&state->cfi);
/*
* We need the full vmlinux for noinstr validation, otherwise we can
* not correctly determine insn_call_dest(insn)->sec (external symbols
* do not have a section).
*/
if (opts.link && opts.noinstr && sec)
state->noinstr = sec->noinstr;
}
static struct cfi_state *cfi_alloc(void)
{
struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
if (!cfi) {
WARN("calloc failed");
exit(1);
}
nr_cfi++;
return cfi;
}
static int cfi_bits;
static struct hlist_head *cfi_hash;
static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
{
return memcmp((void *)cfi1 + sizeof(cfi1->hash),
(void *)cfi2 + sizeof(cfi2->hash),
sizeof(struct cfi_state) - sizeof(struct hlist_node));
}
static inline u32 cfi_key(struct cfi_state *cfi)
{
return jhash((void *)cfi + sizeof(cfi->hash),
sizeof(*cfi) - sizeof(cfi->hash), 0);
}
static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
{
struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
struct cfi_state *obj;
hlist_for_each_entry(obj, head, hash) {
if (!cficmp(cfi, obj)) {
nr_cfi_cache++;
return obj;
}
}
obj = cfi_alloc();
*obj = *cfi;
hlist_add_head(&obj->hash, head);
return obj;
}
static void cfi_hash_add(struct cfi_state *cfi)
{
struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
hlist_add_head(&cfi->hash, head);
}
static void *cfi_hash_alloc(unsigned long size)
{
cfi_bits = max(10, ilog2(size));
cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANON, -1, 0);
if (cfi_hash == (void *)-1L) {
WARN("mmap fail cfi_hash");
cfi_hash = NULL;
} else if (opts.stats) {
printf("cfi_bits: %d\n", cfi_bits);
}
return cfi_hash;
}
static unsigned long nr_insns;
static unsigned long nr_insns_visited;
/*
* Call the arch-specific instruction decoder for all the instructions and add
* them to the global instruction list.
*/
static int decode_instructions(struct objtool_file *file)
{
struct section *sec;
struct symbol *func;
unsigned long offset;
struct instruction *insn;
int ret;
for_each_sec(file, sec) {
struct instruction *insns = NULL;
u8 prev_len = 0;
u8 idx = 0;
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
continue;
if (strcmp(sec->name, ".altinstr_replacement") &&
strcmp(sec->name, ".altinstr_aux") &&
strncmp(sec->name, ".discard.", 9))
sec->text = true;
if (!strcmp(sec->name, ".noinstr.text") ||
!strcmp(sec->name, ".entry.text") ||
!strcmp(sec->name, ".cpuidle.text") ||
!strncmp(sec->name, ".text..__x86.", 13))
sec->noinstr = true;
/*
* .init.text code is ran before userspace and thus doesn't
* strictly need retpolines, except for modules which are
* loaded late, they very much do need retpoline in their
* .init.text
*/
if (!strcmp(sec->name, ".init.text") && !opts.module)
sec->init = true;
for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
if (!insns || idx == INSN_CHUNK_MAX) {
insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
if (!insns) {
WARN("malloc failed");
return -1;
}
idx = 0;
} else {
idx++;
}
insn = &insns[idx];
insn->idx = idx;
INIT_LIST_HEAD(&insn->call_node);
insn->sec = sec;
insn->offset = offset;
insn->prev_len = prev_len;
ret = arch_decode_instruction(file, sec, offset,
sec->sh.sh_size - offset,
insn);
if (ret)
return ret;
prev_len = insn->len;
/*
* By default, "ud2" is a dead end unless otherwise
* annotated, because GCC 7 inserts it for certain
* divide-by-zero cases.
*/
if (insn->type == INSN_BUG)
insn->dead_end = true;
hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
nr_insns++;
}
// printf("%s: last chunk used: %d\n", sec->name, (int)idx);
sec_for_each_sym(sec, func) {
if (func->type != STT_NOTYPE && func->type != STT_FUNC)
continue;
if (func->offset == sec->sh.sh_size) {
/* Heuristic: likely an "end" symbol */
if (func->type == STT_NOTYPE)
continue;
WARN("%s(): STT_FUNC at end of section",
func->name);
return -1;
}
if (func->embedded_insn || func->alias != func)
continue;
if (!find_insn(file, sec, func->offset)) {
WARN("%s(): can't find starting instruction",
func->name);
return -1;
}
sym_for_each_insn(file, func, insn) {
insn->sym = func;
if (func->type == STT_FUNC &&
insn->type == INSN_ENDBR &&
list_empty(&insn->call_node)) {
if (insn->offset == func->offset) {
list_add_tail(&insn->call_node, &file->endbr_list);
file->nr_endbr++;
} else {
file->nr_endbr_int++;
}
}
}
}
}
if (opts.stats)
printf("nr_insns: %lu\n", nr_insns);
return 0;
}
/*
* Read the pv_ops[] .data table to find the static initialized values.
*/
static int add_pv_ops(struct objtool_file *file, const char *symname)
{
struct symbol *sym, *func;
unsigned long off, end;
struct reloc *reloc;
int idx;
sym = find_symbol_by_name(file->elf, symname);
if (!sym)
return 0;
off = sym->offset;
end = off + sym->len;
for (;;) {
reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
if (!reloc)
break;
func = reloc->sym;
if (func->type == STT_SECTION)
func = find_symbol_by_offset(reloc->sym->sec,
reloc_addend(reloc));
idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
objtool_pv_add(file, idx, func);
off = reloc_offset(reloc) + 1;
if (off > end)
break;
}
return 0;
}
/*
* Allocate and initialize file->pv_ops[].
*/
static int init_pv_ops(struct objtool_file *file)
{
static const char *pv_ops_tables[] = {
"pv_ops",
"xen_cpu_ops",
"xen_irq_ops",
"xen_mmu_ops",
NULL,
};
const char *pv_ops;
struct symbol *sym;
int idx, nr;
if (!opts.noinstr)
return 0;
file->pv_ops = NULL;
sym = find_symbol_by_name(file->elf, "pv_ops");
if (!sym)
return 0;
nr = sym->len / sizeof(unsigned long);
file->pv_ops = calloc(sizeof(struct pv_state), nr);
if (!file->pv_ops)
return -1;
for (idx = 0; idx < nr; idx++)
INIT_LIST_HEAD(&file->pv_ops[idx].targets);
for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
add_pv_ops(file, pv_ops);
return 0;
}
static struct instruction *find_last_insn(struct objtool_file *file,
struct section *sec)
{
struct instruction *insn = NULL;
unsigned int offset;
unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
insn = find_insn(file, sec, offset);
return insn;
}
/*
* Mark "ud2" instructions and manually annotated dead ends.
*/
static int add_dead_ends(struct objtool_file *file)
{
struct section *rsec;
struct reloc *reloc;
struct instruction *insn;
s64 addend;
/*
* Check for manually annotated dead ends.
*/
rsec = find_section_by_name(file->elf, ".rela.discard.unreachable");
if (!rsec)
goto reachable;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
addend = reloc_addend(reloc);
insn = find_insn(file, reloc->sym->sec, addend);
if (insn)
insn = prev_insn_same_sec(file, insn);
else if (addend == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find unreachable insn at %s+0x%" PRIx64,
reloc->sym->sec->name, addend);
return -1;
}
} else {
WARN("can't find unreachable insn at %s+0x%" PRIx64,
reloc->sym->sec->name, addend);
return -1;
}
insn->dead_end = true;
}
reachable:
/*
* These manually annotated reachable checks are needed for GCC 4.4,
* where the Linux unreachable() macro isn't supported. In that case
* GCC doesn't know the "ud2" is fatal, so it generates code as if it's
* not a dead end.
*/
rsec = find_section_by_name(file->elf, ".rela.discard.reachable");
if (!rsec)
return 0;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
addend = reloc_addend(reloc);
insn = find_insn(file, reloc->sym->sec, addend);
if (insn)
insn = prev_insn_same_sec(file, insn);
else if (addend == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find reachable insn at %s+0x%" PRIx64,
reloc->sym->sec->name, addend);
return -1;
}
} else {
WARN("can't find reachable insn at %s+0x%" PRIx64,
reloc->sym->sec->name, addend);
return -1;
}
insn->dead_end = false;
}
return 0;
}
static int create_static_call_sections(struct objtool_file *file)
{
struct static_call_site *site;
struct section *sec;
struct instruction *insn;
struct symbol *key_sym;
char *key_name, *tmp;
int idx;
sec = find_section_by_name(file->elf, ".static_call_sites");
if (sec) {
INIT_LIST_HEAD(&file->static_call_list);
WARN("file already has .static_call_sites section, skipping");
return 0;
}
if (list_empty(&file->static_call_list))
return 0;
idx = 0;
list_for_each_entry(insn, &file->static_call_list, call_node)
idx++;
sec = elf_create_section_pair(file->elf, ".static_call_sites",
sizeof(*site), idx, idx * 2);
if (!sec)
return -1;
/* Allow modules to modify the low bits of static_call_site::key */
sec->sh.sh_flags |= SHF_WRITE;
idx = 0;
list_for_each_entry(insn, &file->static_call_list, call_node) {
/* populate reloc for 'addr' */
if (!elf_init_reloc_text_sym(file->elf, sec,
idx * sizeof(*site), idx * 2,
insn->sec, insn->offset))
return -1;
/* find key symbol */
key_name = strdup(insn_call_dest(insn)->name);
if (!key_name) {
perror("strdup");
return -1;
}
if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
STATIC_CALL_TRAMP_PREFIX_LEN)) {
WARN("static_call: trampoline name malformed: %s", key_name);
free(key_name);
return -1;
}
tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
key_sym = find_symbol_by_name(file->elf, tmp);
if (!key_sym) {
if (!opts.module) {
WARN("static_call: can't find static_call_key symbol: %s", tmp);
free(key_name);
return -1;
}
/*
* For modules(), the key might not be exported, which
* means the module can make static calls but isn't
* allowed to change them.
*
* In that case we temporarily set the key to be the
* trampoline address. This is fixed up in
* static_call_add_module().
*/
key_sym = insn_call_dest(insn);
}
free(key_name);
/* populate reloc for 'key' */
if (!elf_init_reloc_data_sym(file->elf, sec,
idx * sizeof(*site) + 4,
(idx * 2) + 1, key_sym,
is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
return -1;
idx++;
}
return 0;
}
static int create_retpoline_sites_sections(struct objtool_file *file)
{
struct instruction *insn;
struct section *sec;
int idx;
sec = find_section_by_name(file->elf, ".retpoline_sites");
if (sec) {
WARN("file already has .retpoline_sites, skipping");
return 0;
}
idx = 0;
list_for_each_entry(insn, &file->retpoline_call_list, call_node)
idx++;
if (!idx)
return 0;
sec = elf_create_section_pair(file->elf, ".retpoline_sites",
sizeof(int), idx, idx);
if (!sec)
return -1;
idx = 0;
list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
if (!elf_init_reloc_text_sym(file->elf, sec,
idx * sizeof(int), idx,
insn->sec, insn->offset))
return -1;
idx++;
}
return 0;
}
static int create_return_sites_sections(struct objtool_file *file)
{
struct instruction *insn;
struct section *sec;
int idx;
sec = find_section_by_name(file->elf, ".return_sites");
if (sec) {
WARN("file already has .return_sites, skipping");
return 0;
}
idx = 0;
list_for_each_entry(insn, &file->return_thunk_list, call_node)
idx++;
if (!idx)
return 0;
sec = elf_create_section_pair(file->elf, ".return_sites",
sizeof(int), idx, idx);
if (!sec)
return -1;
idx = 0;
list_for_each_entry(insn, &file->return_thunk_list, call_node) {
if (!elf_init_reloc_text_sym(file->elf, sec,
idx * sizeof(int), idx,
insn->sec, insn->offset))
return -1;
idx++;
}
return 0;
}
static int create_ibt_endbr_seal_sections(struct objtool_file *file)
{
struct instruction *insn;
struct section *sec;
int idx;
sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
if (sec) {
WARN("file already has .ibt_endbr_seal, skipping");
return 0;
}
idx = 0;
list_for_each_entry(insn, &file->endbr_list, call_node)
idx++;
if (opts.stats) {
printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
printf("ibt: superfluous ENDBR: %d\n", idx);
}
if (!idx)
return 0;
sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
sizeof(int), idx, idx);
if (!sec)
return -1;
idx = 0;
list_for_each_entry(insn, &file->endbr_list, call_node) {
int *site = (int *)sec->data->d_buf + idx;
struct symbol *sym = insn->sym;
*site = 0;
if (opts.module && sym && sym->type == STT_FUNC &&
insn->offset == sym->offset &&
(!strcmp(sym->name, "init_module") ||
!strcmp(sym->name, "cleanup_module")))
WARN("%s(): not an indirect call target", sym->name);
if (!elf_init_reloc_text_sym(file->elf, sec,
idx * sizeof(int), idx,
insn->sec, insn->offset))
return -1;
idx++;
}
return 0;
}
static int create_cfi_sections(struct objtool_file *file)
{
struct section *sec;
struct symbol *sym;
int idx;
sec = find_section_by_name(file->elf, ".cfi_sites");
if (sec) {
INIT_LIST_HEAD(&file->call_list);
WARN("file already has .cfi_sites section, skipping");
return 0;
}
idx = 0;
for_each_sym(file, sym) {
if (sym->type != STT_FUNC)
continue;
if (strncmp(sym->name, "__cfi_", 6))
continue;
idx++;
}
sec = elf_create_section_pair(file->elf, ".cfi_sites",
sizeof(unsigned int), idx, idx);
if (!sec)
return -1;
idx = 0;
for_each_sym(file, sym) {
if (sym->type != STT_FUNC)
continue;
if (strncmp(sym->name, "__cfi_", 6))
continue;
if (!elf_init_reloc_text_sym(file->elf, sec,
idx * sizeof(unsigned int), idx,
sym->sec, sym->offset))
return -1;
idx++;
}
return 0;
}
static int create_mcount_loc_sections(struct objtool_file *file)
{
size_t addr_size = elf_addr_size(file->elf);
struct instruction *insn;
struct section *sec;
int idx;
sec = find_section_by_name(file->elf, "__mcount_loc");
if (sec) {
INIT_LIST_HEAD(&file->mcount_loc_list);
WARN("file already has __mcount_loc section, skipping");
return 0;
}
if (list_empty(&file->mcount_loc_list))
return 0;
idx = 0;
list_for_each_entry(insn, &file->mcount_loc_list, call_node)
idx++;
sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
idx, idx);
if (!sec)
return -1;
sec->sh.sh_addralign = addr_size;
idx = 0;
list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
struct reloc *reloc;
reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
insn->sec, insn->offset);
if (!reloc)
return -1;
set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
idx++;
}
return 0;
}
static int create_direct_call_sections(struct objtool_file *file)
{
struct instruction *insn;
struct section *sec;
int idx;
sec = find_section_by_name(file->elf, ".call_sites");
if (sec) {
INIT_LIST_HEAD(&file->call_list);
WARN("file already has .call_sites section, skipping");
return 0;
}
if (list_empty(&file->call_list))
return 0;
idx = 0;
list_for_each_entry(insn, &file->call_list, call_node)
idx++;
sec = elf_create_section_pair(file->elf, ".call_sites",
sizeof(unsigned int), idx, idx);
if (!sec)
return -1;
idx = 0;
list_for_each_entry(insn, &file->call_list, call_node) {
if (!elf_init_reloc_text_sym(file->elf, sec,
idx * sizeof(unsigned int), idx,
insn->sec, insn->offset))
return -1;
idx++;
}
return 0;
}
/*
* Warnings shouldn't be reported for ignored functions.
*/
static void add_ignores(struct objtool_file *file)
{
struct instruction *insn;
struct section *rsec;
struct symbol *func;
struct reloc *reloc;
rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
if (!rsec)
return;
for_each_reloc(rsec, reloc) {
switch (reloc->sym->type) {
case STT_FUNC:
func = reloc->sym;
break;
case STT_SECTION:
func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
if (!func)
continue;
break;
default:
WARN("unexpected relocation symbol type in %s: %d",
rsec->name, reloc->sym->type);
continue;
}
func_for_each_insn(file, func, insn)
insn->ignore = true;
}
}
/*
* This is a whitelist of functions that is allowed to be called with AC set.
* The list is meant to be minimal and only contains compiler instrumentation
* ABI and a few functions used to implement *_{to,from}_user() functions.
*
* These functions must not directly change AC, but may PUSHF/POPF.
*/
static const char *uaccess_safe_builtin[] = {
/* KASAN */
"kasan_report",
"kasan_check_range",
/* KASAN out-of-line */
"__asan_loadN_noabort",
"__asan_load1_noabort",
"__asan_load2_noabort",
"__asan_load4_noabort",
"__asan_load8_noabort",
"__asan_load16_noabort",
"__asan_storeN_noabort",
"__asan_store1_noabort",
"__asan_store2_noabort",
"__asan_store4_noabort",
"__asan_store8_noabort",
"__asan_store16_noabort",
"__kasan_check_read",
"__kasan_check_write",
/* KASAN in-line */
"__asan_report_load_n_noabort",
"__asan_report_load1_noabort",
"__asan_report_load2_noabort",
"__asan_report_load4_noabort",
"__asan_report_load8_noabort",
"__asan_report_load16_noabort",
"__asan_report_store_n_noabort",
"__asan_report_store1_noabort",
"__asan_report_store2_noabort",
"__asan_report_store4_noabort",
"__asan_report_store8_noabort",
"__asan_report_store16_noabort",
/* KCSAN */
"__kcsan_check_access",
"__kcsan_mb",
"__kcsan_wmb",
"__kcsan_rmb",
"__kcsan_release",
"kcsan_found_watchpoint",
"kcsan_setup_watchpoint",
"kcsan_check_scoped_accesses",
"kcsan_disable_current",
"kcsan_enable_current_nowarn",
/* KCSAN/TSAN */
"__tsan_func_entry",
"__tsan_func_exit",
"__tsan_read_range",
"__tsan_write_range",
"__tsan_read1",
"__tsan_read2",
"__tsan_read4",
"__tsan_read8",
"__tsan_read16",
"__tsan_write1",
"__tsan_write2",
"__tsan_write4",
"__tsan_write8",
"__tsan_write16",
"__tsan_read_write1",
"__tsan_read_write2",
"__tsan_read_write4",
"__tsan_read_write8",
"__tsan_read_write16",
"__tsan_volatile_read1",
"__tsan_volatile_read2",
"__tsan_volatile_read4",
"__tsan_volatile_read8",
"__tsan_volatile_read16",
"__tsan_volatile_write1",
"__tsan_volatile_write2",
"__tsan_volatile_write4",
"__tsan_volatile_write8",
"__tsan_volatile_write16",
"__tsan_atomic8_load",
"__tsan_atomic16_load",
"__tsan_atomic32_load",
"__tsan_atomic64_load",
"__tsan_atomic8_store",
"__tsan_atomic16_store",
"__tsan_atomic32_store",
"__tsan_atomic64_store",
"__tsan_atomic8_exchange",
"__tsan_atomic16_exchange",
"__tsan_atomic32_exchange",
"__tsan_atomic64_exchange",
"__tsan_atomic8_fetch_add",
"__tsan_atomic16_fetch_add",
"__tsan_atomic32_fetch_add",
"__tsan_atomic64_fetch_add",
"__tsan_atomic8_fetch_sub",
"__tsan_atomic16_fetch_sub",
"__tsan_atomic32_fetch_sub",
"__tsan_atomic64_fetch_sub",
"__tsan_atomic8_fetch_and",
"__tsan_atomic16_fetch_and",
"__tsan_atomic32_fetch_and",
"__tsan_atomic64_fetch_and",
"__tsan_atomic8_fetch_or",
"__tsan_atomic16_fetch_or",
"__tsan_atomic32_fetch_or",
"__tsan_atomic64_fetch_or",
"__tsan_atomic8_fetch_xor",
"__tsan_atomic16_fetch_xor",
"__tsan_atomic32_fetch_xor",
"__tsan_atomic64_fetch_xor",
"__tsan_atomic8_fetch_nand",
"__tsan_atomic16_fetch_nand",
"__tsan_atomic32_fetch_nand",
"__tsan_atomic64_fetch_nand",
"__tsan_atomic8_compare_exchange_strong",
"__tsan_atomic16_compare_exchange_strong",
"__tsan_atomic32_compare_exchange_strong",
"__tsan_atomic64_compare_exchange_strong",
"__tsan_atomic8_compare_exchange_weak",
"__tsan_atomic16_compare_exchange_weak",
"__tsan_atomic32_compare_exchange_weak",
"__tsan_atomic64_compare_exchange_weak",
"__tsan_atomic8_compare_exchange_val",
"__tsan_atomic16_compare_exchange_val",
"__tsan_atomic32_compare_exchange_val",
"__tsan_atomic64_compare_exchange_val",
"__tsan_atomic_thread_fence",
"__tsan_atomic_signal_fence",
"__tsan_unaligned_read16",
"__tsan_unaligned_write16",
/* KCOV */
"write_comp_data",
"check_kcov_mode",
"__sanitizer_cov_trace_pc",
"__sanitizer_cov_trace_const_cmp1",
"__sanitizer_cov_trace_const_cmp2",
"__sanitizer_cov_trace_const_cmp4",
"__sanitizer_cov_trace_const_cmp8",
"__sanitizer_cov_trace_cmp1",
"__sanitizer_cov_trace_cmp2",
"__sanitizer_cov_trace_cmp4",
"__sanitizer_cov_trace_cmp8",
"__sanitizer_cov_trace_switch",
/* KMSAN */
"kmsan_copy_to_user",
"kmsan_report",
"kmsan_unpoison_entry_regs",
"kmsan_unpoison_memory",
"__msan_chain_origin",
"__msan_get_context_state",
"__msan_instrument_asm_store",
"__msan_metadata_ptr_for_load_1",
"__msan_metadata_ptr_for_load_2",
"__msan_metadata_ptr_for_load_4",
"__msan_metadata_ptr_for_load_8",
"__msan_metadata_ptr_for_load_n",
"__msan_metadata_ptr_for_store_1",
"__msan_metadata_ptr_for_store_2",
"__msan_metadata_ptr_for_store_4",
"__msan_metadata_ptr_for_store_8",
"__msan_metadata_ptr_for_store_n",
"__msan_poison_alloca",
"__msan_warning",
/* UBSAN */
"ubsan_type_mismatch_common",
"__ubsan_handle_type_mismatch",
"__ubsan_handle_type_mismatch_v1",
"__ubsan_handle_shift_out_of_bounds",
"__ubsan_handle_load_invalid_value",
/* STACKLEAK */
"stackleak_track_stack",
/* misc */
"csum_partial_copy_generic",
"copy_mc_fragile",
"copy_mc_fragile_handle_tail",
"copy_mc_enhanced_fast_string",
"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
"rep_stos_alternative",
"rep_movs_alternative",
"__copy_user_nocache",
NULL
};
static void add_uaccess_safe(struct objtool_file *file)
{
struct symbol *func;
const char **name;
if (!opts.uaccess)
return;
for (name = uaccess_safe_builtin; *name; name++) {
func = find_symbol_by_name(file->elf, *name);
if (!func)
continue;
func->uaccess_safe = true;
}
}
/*
* FIXME: For now, just ignore any alternatives which add retpolines. This is
* a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
* But it at least allows objtool to understand the control flow *around* the
* retpoline.
*/
static int add_ignore_alternatives(struct objtool_file *file)
{
struct section *rsec;
struct reloc *reloc;
struct instruction *insn;
rsec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
if (!rsec)
return 0;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (!insn) {
WARN("bad .discard.ignore_alts entry");
return -1;
}
insn->ignore_alts = true;
}
return 0;
}
/*
* Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
* will be added to the .retpoline_sites section.
*/
__weak bool arch_is_retpoline(struct symbol *sym)
{
return false;
}
/*
* Symbols that replace INSN_RETURN, every (tail) call to such a symbol
* will be added to the .return_sites section.
*/
__weak bool arch_is_rethunk(struct symbol *sym)
{
return false;
}
/*
* Symbols that are embedded inside other instructions, because sometimes crazy
* code exists. These are mostly ignored for validation purposes.
*/
__weak bool arch_is_embedded_insn(struct symbol *sym)
{
return false;
}
static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
{
struct reloc *reloc;
if (insn->no_reloc)
return NULL;
if (!file)
return NULL;
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
insn->offset, insn->len);
if (!reloc) {
insn->no_reloc = 1;
return NULL;
}
return reloc;
}
static void remove_insn_ops(struct instruction *insn)
{
struct stack_op *op, *next;
for (op = insn->stack_ops; op; op = next) {
next = op->next;
free(op);
}
insn->stack_ops = NULL;
}
static void annotate_call_site(struct objtool_file *file,
struct instruction *insn, bool sibling)
{
struct reloc *reloc = insn_reloc(file, insn);
struct symbol *sym = insn_call_dest(insn);
if (!sym)
sym = reloc->sym;
/*
* Alternative replacement code is just template code which is
* sometimes copied to the original instruction. For now, don't
* annotate it. (In the future we might consider annotating the
* original instruction if/when it ever makes sense to do so.)
*/
if (!strcmp(insn->sec->name, ".altinstr_replacement"))
return;
if (sym->static_call_tramp) {
list_add_tail(&insn->call_node, &file->static_call_list);
return;
}
if (sym->retpoline_thunk) {
list_add_tail(&insn->call_node, &file->retpoline_call_list);
return;
}
/*
* Many compilers cannot disable KCOV or sanitizer calls with a function
* attribute so they need a little help, NOP out any such calls from
* noinstr text.
*/
if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
elf_write_insn(file->elf, insn->sec,
insn->offset, insn->len,
sibling ? arch_ret_insn(insn->len)
: arch_nop_insn(insn->len));
insn->type = sibling ? INSN_RETURN : INSN_NOP;
if (sibling) {
/*
* We've replaced the tail-call JMP insn by two new
* insn: RET; INT3, except we only have a single struct
* insn here. Mark it retpoline_safe to avoid the SLS
* warning, instead of adding another insn.
*/
insn->retpoline_safe = true;
}
return;
}
if (opts.mcount && sym->fentry) {
if (sibling)
WARN_INSN(insn, "tail call to __fentry__ !?!?");
if (opts.mnop) {
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
elf_write_insn(file->elf, insn->sec,
insn->offset, insn->len,
arch_nop_insn(insn->len));
insn->type = INSN_NOP;
}
list_add_tail(&insn->call_node, &file->mcount_loc_list);
return;
}
if (insn->type == INSN_CALL && !insn->sec->init)
list_add_tail(&insn->call_node, &file->call_list);
if (!sibling && dead_end_function(file, sym))
insn->dead_end = true;
}
static void add_call_dest(struct objtool_file *file, struct instruction *insn,
struct symbol *dest, bool sibling)
{
insn->_call_dest = dest;
if (!dest)
return;
/*
* Whatever stack impact regular CALLs have, should be undone
* by the RETURN of the called function.
*
* Annotated intra-function calls retain the stack_ops but
* are converted to JUMP, see read_intra_function_calls().
*/
remove_insn_ops(insn);
annotate_call_site(file, insn, sibling);
}
static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
{
/*
* Retpoline calls/jumps are really dynamic calls/jumps in disguise,
* so convert them accordingly.
*/
switch (insn->type) {
case INSN_CALL:
insn->type = INSN_CALL_DYNAMIC;
break;
case INSN_JUMP_UNCONDITIONAL:
insn->type = INSN_JUMP_DYNAMIC;
break;
case INSN_JUMP_CONDITIONAL:
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
break;
default:
return;
}
insn->retpoline_safe = true;
/*
* Whatever stack impact regular CALLs have, should be undone
* by the RETURN of the called function.
*
* Annotated intra-function calls retain the stack_ops but
* are converted to JUMP, see read_intra_function_calls().
*/
remove_insn_ops(insn);
annotate_call_site(file, insn, false);
}
static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
{
/*
* Return thunk tail calls are really just returns in disguise,
* so convert them accordingly.
*/
insn->type = INSN_RETURN;
insn->retpoline_safe = true;
if (add)
list_add_tail(&insn->call_node, &file->return_thunk_list);
}
static bool is_first_func_insn(struct objtool_file *file,
struct instruction *insn, struct symbol *sym)
{
if (insn->offset == sym->offset)
return true;
/* Allow direct CALL/JMP past ENDBR */
if (opts.ibt) {
struct instruction *prev = prev_insn_same_sym(file, insn);
if (prev && prev->type == INSN_ENDBR &&
insn->offset == sym->offset + prev->len)
return true;
}
return false;
}
/*
* A sibling call is a tail-call to another symbol -- to differentiate from a
* recursive tail-call which is to the same symbol.
*/
static bool jump_is_sibling_call(struct objtool_file *file,
struct instruction *from, struct instruction *to)
{
struct symbol *fs = from->sym;
struct symbol *ts = to->sym;
/* Not a sibling call if from/to a symbol hole */
if (!fs || !ts)
return false;
/* Not a sibling call if not targeting the start of a symbol. */
if (!is_first_func_insn(file, to, ts))
return false;
/* Disallow sibling calls into STT_NOTYPE */
if (ts->type == STT_NOTYPE)
return false;
/* Must not be self to be a sibling */
return fs->pfunc != ts->pfunc;
}
/*
* Find the destination instructions for all jumps.
*/
static int add_jump_destinations(struct objtool_file *file)
{
struct instruction *insn, *jump_dest;
struct reloc *reloc;
struct section *dest_sec;
unsigned long dest_off;
for_each_insn(file, insn) {
if (insn->jump_dest) {
/*
* handle_group_alt() may have previously set
* 'jump_dest' for some alternatives.
*/
continue;
}
if (!is_static_jump(insn))
continue;
reloc = insn_reloc(file, insn);
if (!reloc) {
dest_sec = insn->sec;
dest_off = arch_jump_destination(insn);
} else if (reloc->sym->type == STT_SECTION) {
dest_sec = reloc->sym->sec;
dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
} else if (reloc->sym->retpoline_thunk) {
add_retpoline_call(file, insn);
continue;
} else if (reloc->sym->return_thunk) {
add_return_call(file, insn, true);
continue;
} else if (insn_func(insn)) {
/*
* External sibling call or internal sibling call with
* STT_FUNC reloc.
*/
add_call_dest(file, insn, reloc->sym, true);
continue;
} else if (reloc->sym->sec->idx) {
dest_sec = reloc->sym->sec;
dest_off = reloc->sym->sym.st_value +
arch_dest_reloc_offset(reloc_addend(reloc));
} else {
/* non-func asm code jumping to another file */
continue;
}
jump_dest = find_insn(file, dest_sec, dest_off);
if (!jump_dest) {
struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
/*
* This is a special case for retbleed_untrain_ret().
* It jumps to __x86_return_thunk(), but objtool
* can't find the thunk's starting RET
* instruction, because the RET is also in the
* middle of another instruction. Objtool only
* knows about the outer instruction.
*/
if (sym && sym->embedded_insn) {
add_return_call(file, insn, false);
continue;
}
WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
dest_sec->name, dest_off);
return -1;
}
/*
* Cross-function jump.
*/
if (insn_func(insn) && insn_func(jump_dest) &&
insn_func(insn) != insn_func(jump_dest)) {
/*
* For GCC 8+, create parent/child links for any cold
* subfunctions. This is _mostly_ redundant with a
* similar initialization in read_symbols().
*
* If a function has aliases, we want the *first* such
* function in the symbol table to be the subfunction's
* parent. In that case we overwrite the
* initialization done in read_symbols().
*
* However this code can't completely replace the
* read_symbols() code because this doesn't detect the
* case where the parent function's only reference to a
* subfunction is through a jump table.
*/
if (!strstr(insn_func(insn)->name, ".cold") &&
strstr(insn_func(jump_dest)->name, ".cold")) {
insn_func(insn)->cfunc = insn_func(jump_dest);
insn_func(jump_dest)->pfunc = insn_func(insn);
}
}
if (jump_is_sibling_call(file, insn, jump_dest)) {
/*
* Internal sibling call without reloc or with
* STT_SECTION reloc.
*/
add_call_dest(file, insn, insn_func(jump_dest), true);
continue;
}
insn->jump_dest = jump_dest;
}
return 0;
}
static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
{
struct symbol *call_dest;
call_dest = find_func_by_offset(sec, offset);
if (!call_dest)
call_dest = find_symbol_by_offset(sec, offset);
return call_dest;
}
/*
* Find the destination instructions for all calls.
*/
static int add_call_destinations(struct objtool_file *file)
{
struct instruction *insn;
unsigned long dest_off;
struct symbol *dest;
struct reloc *reloc;
for_each_insn(file, insn) {
if (insn->type != INSN_CALL)
continue;
reloc = insn_reloc(file, insn);
if (!reloc) {
dest_off = arch_jump_destination(insn);
dest = find_call_destination(insn->sec, dest_off);
add_call_dest(file, insn, dest, false);
if (insn->ignore)
continue;
if (!insn_call_dest(insn)) {
WARN_INSN(insn, "unannotated intra-function call");
return -1;
}
if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
WARN_INSN(insn, "unsupported call to non-function");
return -1;
}
} else if (reloc->sym->type == STT_SECTION) {
dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
dest = find_call_destination(reloc->sym->sec, dest_off);
if (!dest) {
WARN_INSN(insn, "can't find call dest symbol at %s+0x%lx",
reloc->sym->sec->name, dest_off);
return -1;
}
add_call_dest(file, insn, dest, false);
} else if (reloc->sym->retpoline_thunk) {
add_retpoline_call(file, insn);
} else
add_call_dest(file, insn, reloc->sym, false);
}
return 0;
}
/*
* The .alternatives section requires some extra special care over and above
* other special sections because alternatives are patched in place.
*/
static int handle_group_alt(struct objtool_file *file,
struct special_alt *special_alt,
struct instruction *orig_insn,
struct instruction **new_insn)
{
struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
struct alt_group *orig_alt_group, *new_alt_group;
unsigned long dest_off;
orig_alt_group = orig_insn->alt_group;
if (!orig_alt_group) {
struct instruction *last_orig_insn = NULL;
orig_alt_group = malloc(sizeof(*orig_alt_group));
if (!orig_alt_group) {
WARN("malloc failed");
return -1;
}
orig_alt_group->cfi = calloc(special_alt->orig_len,
sizeof(struct cfi_state *));
if (!orig_alt_group->cfi) {
WARN("calloc failed");
return -1;
}
insn = orig_insn;
sec_for_each_insn_from(file, insn) {
if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
break;
insn->alt_group = orig_alt_group;
last_orig_insn = insn;
}
orig_alt_group->orig_group = NULL;
orig_alt_group->first_insn = orig_insn;
orig_alt_group->last_insn = last_orig_insn;
orig_alt_group->nop = NULL;
} else {
if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
orig_alt_group->first_insn->offset != special_alt->orig_len) {
WARN_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
orig_alt_group->last_insn->offset +
orig_alt_group->last_insn->len -
orig_alt_group->first_insn->offset,
special_alt->orig_len);
return -1;
}
}
new_alt_group = malloc(sizeof(*new_alt_group));
if (!new_alt_group) {
WARN("malloc failed");
return -1;
}
if (special_alt->new_len < special_alt->orig_len) {
/*
* Insert a fake nop at the end to make the replacement
* alt_group the same size as the original. This is needed to
* allow propagate_alt_cfi() to do its magic. When the last
* instruction affects the stack, the instruction after it (the
* nop) will propagate the new state to the shared CFI array.
*/
nop = malloc(sizeof(*nop));
if (!nop) {
WARN("malloc failed");
return -1;
}
memset(nop, 0, sizeof(*nop));
nop->sec = special_alt->new_sec;
nop->offset = special_alt->new_off + special_alt->new_len;
nop->len = special_alt->orig_len - special_alt->new_len;
nop->type = INSN_NOP;
nop->sym = orig_insn->sym;
nop->alt_group = new_alt_group;
nop->ignore = orig_insn->ignore_alts;
}
if (!special_alt->new_len) {
*new_insn = nop;
goto end;
}
insn = *new_insn;
sec_for_each_insn_from(file, insn) {
struct reloc *alt_reloc;
if (insn->offset >= special_alt->new_off + special_alt->new_len)
break;
last_new_insn = insn;
insn->ignore = orig_insn->ignore_alts;
insn->sym = orig_insn->sym;
insn->alt_group = new_alt_group;
/*
* Since alternative replacement code is copy/pasted by the
* kernel after applying relocations, generally such code can't
* have relative-address relocation references to outside the
* .altinstr_replacement section, unless the arch's
* alternatives code can adjust the relative offsets
* accordingly.
*/
alt_reloc = insn_reloc(file, insn);
if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
!arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
WARN_INSN(insn, "unsupported relocation in alternatives section");
return -1;
}
if (!is_static_jump(insn))
continue;
if (!insn->immediate)
continue;
dest_off = arch_jump_destination(insn);
if (dest_off == special_alt->new_off + special_alt->new_len) {
insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
if (!insn->jump_dest) {
WARN_INSN(insn, "can't find alternative jump destination");
return -1;
}
}
}
if (!last_new_insn) {
WARN_FUNC("can't find last new alternative instruction",
special_alt->new_sec, special_alt->new_off);
return -1;
}
end:
new_alt_group->orig_group = orig_alt_group;
new_alt_group->first_insn = *new_insn;
new_alt_group->last_insn = last_new_insn;
new_alt_group->nop = nop;
new_alt_group->cfi = orig_alt_group->cfi;
return 0;
}
/*
* A jump table entry can either convert a nop to a jump or a jump to a nop.
* If the original instruction is a jump, make the alt entry an effective nop
* by just skipping the original instruction.
*/
static int handle_jump_alt(struct objtool_file *file,
struct special_alt *special_alt,
struct instruction *orig_insn,
struct instruction **new_insn)
{
if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
orig_insn->type != INSN_NOP) {
WARN_INSN(orig_insn, "unsupported instruction at jump label");
return -1;
}
if (opts.hack_jump_label && special_alt->key_addend & 2) {
struct reloc *reloc = insn_reloc(file, orig_insn);
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
elf_write_insn(file->elf, orig_insn->sec,
orig_insn->offset, orig_insn->len,
arch_nop_insn(orig_insn->len));
orig_insn->type = INSN_NOP;
}
if (orig_insn->type == INSN_NOP) {
if (orig_insn->len == 2)
file->jl_nop_short++;
else
file->jl_nop_long++;
return 0;
}
if (orig_insn->len == 2)
file->jl_short++;
else
file->jl_long++;
*new_insn = next_insn_same_sec(file, orig_insn);
return 0;
}
/*
* Read all the special sections which have alternate instructions which can be
* patched in or redirected to at runtime. Each instruction having alternate
* instruction(s) has them added to its insn->alts list, which will be
* traversed in validate_branch().
*/
static int add_special_section_alts(struct objtool_file *file)
{
struct list_head special_alts;
struct instruction *orig_insn, *new_insn;
struct special_alt *special_alt, *tmp;
struct alternative *alt;
int ret;
ret = special_get_alts(file->elf, &special_alts);
if (ret)
return ret;
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
orig_insn = find_insn(file, special_alt->orig_sec,
special_alt->orig_off);
if (!orig_insn) {
WARN_FUNC("special: can't find orig instruction",
special_alt->orig_sec, special_alt->orig_off);
ret = -1;
goto out;
}
new_insn = NULL;
if (!special_alt->group || special_alt->new_len) {
new_insn = find_insn(file, special_alt->new_sec,
special_alt->new_off);
if (!new_insn) {
WARN_FUNC("special: can't find new instruction",
special_alt->new_sec,
special_alt->new_off);
ret = -1;
goto out;
}
}
if (special_alt->group) {
if (!special_alt->orig_len) {
WARN_INSN(orig_insn, "empty alternative entry");
continue;
}
ret = handle_group_alt(file, special_alt, orig_insn,
&new_insn);
if (ret)
goto out;
} else if (special_alt->jump_or_nop) {
ret = handle_jump_alt(file, special_alt, orig_insn,
&new_insn);
if (ret)
goto out;
}
alt = malloc(sizeof(*alt));
if (!alt) {
WARN("malloc failed");
ret = -1;
goto out;
}
alt->insn = new_insn;
alt->skip_orig = special_alt->skip_orig;
orig_insn->ignore_alts |= special_alt->skip_alt;
alt->next = orig_insn->alts;
orig_insn->alts = alt;
list_del(&special_alt->list);
free(special_alt);
}
if (opts.stats) {
printf("jl\\\tNOP\tJMP\n");
printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
}
out:
return ret;
}
static int add_jump_table(struct objtool_file *file, struct instruction *insn,
struct reloc *next_table)
{
struct symbol *pfunc = insn_func(insn)->pfunc;
struct reloc *table = insn_jump_table(insn);
struct instruction *dest_insn;
unsigned int prev_offset = 0;
struct reloc *reloc = table;
struct alternative *alt;
/*
* Each @reloc is a switch table relocation which points to the target
* instruction.
*/
for_each_reloc_from(table->sec, reloc) {
/* Check for the end of the table: */
if (reloc != table && reloc == next_table)
break;
/* Make sure the table entries are consecutive: */
if (prev_offset && reloc_offset(reloc) != prev_offset + 8)
break;
/* Detect function pointers from contiguous objects: */
if (reloc->sym->sec == pfunc->sec &&
reloc_addend(reloc) == pfunc->offset)
break;
dest_insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (!dest_insn)
break;
/* Make sure the destination is in the same function: */
if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
break;
alt = malloc(sizeof(*alt));
if (!alt) {
WARN("malloc failed");
return -1;
}
alt->insn = dest_insn;
alt->next = insn->alts;
insn->alts = alt;
prev_offset = reloc_offset(reloc);
}
if (!prev_offset) {
WARN_INSN(insn, "can't find switch jump table");
return -1;
}
return 0;
}
/*
* find_jump_table() - Given a dynamic jump, find the switch jump table
* associated with it.
*/
static struct reloc *find_jump_table(struct objtool_file *file,
struct symbol *func,
struct instruction *insn)
{
struct reloc *table_reloc;
struct instruction *dest_insn, *orig_insn = insn;
/*
* Backward search using the @first_jump_src links, these help avoid
* much of the 'in between' code. Which avoids us getting confused by
* it.
*/
for (;
insn && insn_func(insn) && insn_func(insn)->pfunc == func;
insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
break;
/* allow small jumps within the range */
if (insn->type == INSN_JUMP_UNCONDITIONAL &&
insn->jump_dest &&
(insn->jump_dest->offset <= insn->offset ||
insn->jump_dest->offset > orig_insn->offset))
break;
table_reloc = arch_find_switch_table(file, insn);
if (!table_reloc)
continue;
dest_insn = find_insn(file, table_reloc->sym->sec, reloc_addend(table_reloc));
if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
continue;
return table_reloc;
}
return NULL;
}
/*
* First pass: Mark the head of each jump table so that in the next pass,
* we know when a given jump table ends and the next one starts.
*/
static void mark_func_jump_tables(struct objtool_file *file,
struct symbol *func)
{
struct instruction *insn, *last = NULL;
struct reloc *reloc;
func_for_each_insn(file, func, insn) {
if (!last)
last = insn;
/*
* Store back-pointers for unconditional forward jumps such
* that find_jump_table() can back-track using those and
* avoid some potentially confusing code.
*/
if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
insn->offset > last->offset &&
insn->jump_dest->offset > insn->offset &&
!insn->jump_dest->first_jump_src) {
insn->jump_dest->first_jump_src = insn;
last = insn->jump_dest;
}
if (insn->type != INSN_JUMP_DYNAMIC)
continue;
reloc = find_jump_table(file, func, insn);
if (reloc)
insn->_jump_table = reloc;
}
}
static int add_func_jump_tables(struct objtool_file *file,
struct symbol *func)
{
struct instruction *insn, *insn_t1 = NULL, *insn_t2;
int ret = 0;
func_for_each_insn(file, func, insn) {
if (!insn_jump_table(insn))
continue;
if (!insn_t1) {
insn_t1 = insn;
continue;
}
insn_t2 = insn;
ret = add_jump_table(file, insn_t1, insn_jump_table(insn_t2));
if (ret)
return ret;
insn_t1 = insn_t2;
}
if (insn_t1)
ret = add_jump_table(file, insn_t1, NULL);
return ret;
}
/*
* For some switch statements, gcc generates a jump table in the .rodata
* section which contains a list of addresses within the function to jump to.
* This finds these jump tables and adds them to the insn->alts lists.
*/
static int add_jump_table_alts(struct objtool_file *file)
{
struct symbol *func;
int ret;
if (!file->rodata)
return 0;
for_each_sym(file, func) {
if (func->type != STT_FUNC)
continue;
mark_func_jump_tables(file, func);
ret = add_func_jump_tables(file, func);
if (ret)
return ret;
}
return 0;
}
static void set_func_state(struct cfi_state *state)
{
state->cfa = initial_func_cfi.cfa;
memcpy(&state->regs, &initial_func_cfi.regs,
CFI_NUM_REGS * sizeof(struct cfi_reg));
state->stack_size = initial_func_cfi.cfa.offset;
state->type = UNWIND_HINT_TYPE_CALL;
}
static int read_unwind_hints(struct objtool_file *file)
{
struct cfi_state cfi = init_cfi;
struct section *sec;
struct unwind_hint *hint;
struct instruction *insn;
struct reloc *reloc;
int i;
sec = find_section_by_name(file->elf, ".discard.unwind_hints");
if (!sec)
return 0;
if (!sec->rsec) {
WARN("missing .rela.discard.unwind_hints section");
return -1;
}
if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
WARN("struct unwind_hint size mismatch");
return -1;
}
file->hints = true;
for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
hint = (struct unwind_hint *)sec->data->d_buf + i;
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
if (!reloc) {
WARN("can't find reloc for unwind_hints[%d]", i);
return -1;
}
insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (!insn) {
WARN("can't find insn for unwind_hints[%d]", i);
return -1;
}
insn->hint = true;
if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
insn->cfi = &force_undefined_cfi;
continue;
}
if (hint->type == UNWIND_HINT_TYPE_SAVE) {
insn->hint = false;
insn->save = true;
continue;
}
if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
insn->restore = true;
continue;
}
if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
if (sym && sym->bind == STB_GLOBAL) {
if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
}
}
}
if (hint->type == UNWIND_HINT_TYPE_FUNC) {
insn->cfi = &func_cfi;
continue;
}
if (insn->cfi)
cfi = *(insn->cfi);
if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
return -1;
}
cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
cfi.type = hint->type;
cfi.signal = hint->signal;
insn->cfi = cfi_hash_find_or_add(&cfi);
}
return 0;
}
static int read_noendbr_hints(struct objtool_file *file)
{
struct instruction *insn;
struct section *rsec;
struct reloc *reloc;
rsec = find_section_by_name(file->elf, ".rela.discard.noendbr");
if (!rsec)
return 0;
for_each_reloc(rsec, reloc) {
insn = find_insn(file, reloc->sym->sec,
reloc->sym->offset + reloc_addend(reloc));
if (!insn) {
WARN("bad .discard.noendbr entry");
return -1;
}
insn->noendbr = 1;
}
return 0;
}
static int read_retpoline_hints(struct objtool_file *file)
{
struct section *rsec;
struct instruction *insn;
struct reloc *reloc;
rsec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
if (!rsec)
return 0;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (!insn) {
WARN("bad .discard.retpoline_safe entry");
return -1;
}
if (insn->type != INSN_JUMP_DYNAMIC &&
insn->type != INSN_CALL_DYNAMIC &&
insn->type != INSN_RETURN &&
insn->type != INSN_NOP) {
WARN_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
return -1;
}
insn->retpoline_safe = true;
}
return 0;
}
static int read_instr_hints(struct objtool_file *file)
{
struct section *rsec;
struct instruction *insn;
struct reloc *reloc;
rsec = find_section_by_name(file->elf, ".rela.discard.instr_end");
if (!rsec)
return 0;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (!insn) {
WARN("bad .discard.instr_end entry");
return -1;
}
insn->instr--;
}
rsec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
if (!rsec)
return 0;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (!insn) {
WARN("bad .discard.instr_begin entry");
return -1;
}
insn->instr++;
}
return 0;
}
static int read_validate_unret_hints(struct objtool_file *file)
{
struct section *rsec;
struct instruction *insn;
struct reloc *reloc;
rsec = find_section_by_name(file->elf, ".rela.discard.validate_unret");
if (!rsec)
return 0;
for_each_reloc(rsec, reloc) {
if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (!insn) {
WARN("bad .discard.instr_end entry");
return -1;
}
insn->unret = 1;
}
return 0;
}
static int read_intra_function_calls(struct objtool_file *file)
{
struct instruction *insn;
struct section *rsec;
struct reloc *reloc;
rsec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
if (!rsec)
return 0;
for_each_reloc(rsec, reloc) {
unsigned long dest_off;
if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s",
rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
if (!insn) {
WARN("bad .discard.intra_function_call entry");
return -1;
}
if (insn->type != INSN_CALL) {
WARN_INSN(insn, "intra_function_call not a direct call");
return -1;
}
/*
* Treat intra-function CALLs as JMPs, but with a stack_op.
* See add_call_destinations(), which strips stack_ops from
* normal CALLs.
*/
insn->type = INSN_JUMP_UNCONDITIONAL;
dest_off = arch_jump_destination(insn);
insn->jump_dest = find_insn(file, insn->sec, dest_off);
if (!insn->jump_dest) {
WARN_INSN(insn, "can't find call dest at %s+0x%lx",
insn->sec->name, dest_off);
return -1;
}
}
return 0;
}
/*
* Return true if name matches an instrumentation function, where calls to that
* function from noinstr code can safely be removed, but compilers won't do so.
*/
static bool is_profiling_func(const char *name)
{
/*
* Many compilers cannot disable KCOV with a function attribute.
*/
if (!strncmp(name, "__sanitizer_cov_", 16))
return true;
/*
* Some compilers currently do not remove __tsan_func_entry/exit nor
* __tsan_atomic_signal_fence (used for barrier instrumentation) with
* the __no_sanitize_thread attribute, remove them. Once the kernel's
* minimum Clang version is 14.0, this can be removed.
*/
if (!strncmp(name, "__tsan_func_", 12) ||
!strcmp(name, "__tsan_atomic_signal_fence"))
return true;
return false;
}
static int classify_symbols(struct objtool_file *file)
{
struct symbol *func;
for_each_sym(file, func) {
if (func->bind != STB_GLOBAL)
continue;
if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
func->static_call_tramp = true;
if (arch_is_retpoline(func))
func->retpoline_thunk = true;
if (arch_is_rethunk(func))
func->return_thunk = true;
if (arch_is_embedded_insn(func))
func->embedded_insn = true;
if (arch_ftrace_match(func->name))
func->fentry = true;
if (is_profiling_func(func->name))
func->profiling_func = true;
}
return 0;
}
static void mark_rodata(struct objtool_file *file)
{
struct section *sec;
bool found = false;
/*
* Search for the following rodata sections, each of which can
* potentially contain jump tables:
*
* - .rodata: can contain GCC switch tables
* - .rodata.<func>: same, if -fdata-sections is being used
* - .rodata..c_jump_table: contains C annotated jump tables
*
* .rodata.str1.* sections are ignored; they don't contain jump tables.
*/
for_each_sec(file, sec) {
if (!strncmp(sec->name, ".rodata", 7) &&
!strstr(sec->name, ".str1.")) {
sec->rodata = true;
found = true;
}
}
file->rodata = found;
}
static int decode_sections(struct objtool_file *file)
{
int ret;
mark_rodata(file);
ret = init_pv_ops(file);
if (ret)
return ret;
/*
* Must be before add_{jump_call}_destination.
*/
ret = classify_symbols(file);
if (ret)
return ret;
ret = decode_instructions(file);
if (ret)
return ret;
add_ignores(file);
add_uaccess_safe(file);
ret = add_ignore_alternatives(file);
if (ret)
return ret;
/*
* Must be before read_unwind_hints() since that needs insn->noendbr.
*/
ret = read_noendbr_hints(file);
if (ret)
return ret;
/*
* Must be before add_jump_destinations(), which depends on 'func'
* being set for alternatives, to enable proper sibling call detection.
*/
if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
ret = add_special_section_alts(file);
if (ret)
return ret;
}
ret = add_jump_destinations(file);
if (ret)
return ret;
/*
* Must be before add_call_destination(); it changes INSN_CALL to
* INSN_JUMP.
*/
ret = read_intra_function_calls(file);
if (ret)
return ret;
ret = add_call_destinations(file);
if (ret)
return ret;
/*
* Must be after add_call_destinations() such that it can override
* dead_end_function() marks.
*/
ret = add_dead_ends(file);
if (ret)
return ret;
ret = add_jump_table_alts(file);
if (ret)
return ret;
ret = read_unwind_hints(file);
if (ret)
return ret;
ret = read_retpoline_hints(file);
if (ret)
return ret;
ret = read_instr_hints(file);
if (ret)
return ret;
ret = read_validate_unret_hints(file);
if (ret)
return ret;
return 0;
}
static bool is_special_call(struct instruction *insn)
{
if (insn->type == INSN_CALL) {
struct symbol *dest = insn_call_dest(insn);
if (!dest)
return false;
if (dest->fentry || dest->embedded_insn)
return true;
}
return false;
}
static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
{
struct cfi_state *cfi = &state->cfi;
int i;
if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
return true;
if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
return true;
if (cfi->stack_size != initial_func_cfi.cfa.offset)
return true;
for (i = 0; i < CFI_NUM_REGS; i++) {
if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
return true;
}
return false;
}
static bool check_reg_frame_pos(const struct cfi_reg *reg,
int expected_offset)
{
return reg->base == CFI_CFA &&
reg->offset == expected_offset;
}
static bool has_valid_stack_frame(struct insn_state *state)
{
struct cfi_state *cfi = &state->cfi;
if (cfi->cfa.base == CFI_BP &&
check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
return true;
if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
return true;
return false;
}
static int update_cfi_state_regs(struct instruction *insn,
struct cfi_state *cfi,
struct stack_op *op)
{
struct cfi_reg *cfa = &cfi->cfa;
if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
return 0;
/* push */
if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
cfa->offset += 8;
/* pop */
if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
cfa->offset -= 8;
/* add immediate to sp */
if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
cfa->offset -= op->src.offset;
return 0;
}
static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
{
if (arch_callee_saved_reg(reg) &&
cfi->regs[reg].base == CFI_UNDEFINED) {
cfi->regs[reg].base = base;
cfi->regs[reg].offset = offset;
}
}
static void restore_reg(struct cfi_state *cfi, unsigned char reg)
{
cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
}
/*
* A note about DRAP stack alignment:
*
* GCC has the concept of a DRAP register, which is used to help keep track of
* the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
* register. The typical DRAP pattern is:
*
* 4c 8d 54 24 08 lea 0x8(%rsp),%r10
* 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
* 41 ff 72 f8 pushq -0x8(%r10)
* 55 push %rbp
* 48 89 e5 mov %rsp,%rbp
* (more pushes)
* 41 52 push %r10
* ...
* 41 5a pop %r10
* (more pops)
* 5d pop %rbp
* 49 8d 62 f8 lea -0x8(%r10),%rsp
* c3 retq
*
* There are some variations in the epilogues, like:
*
* 5b pop %rbx
* 41 5a pop %r10
* 41 5c pop %r12
* 41 5d pop %r13
* 41 5e pop %r14
* c9 leaveq
* 49 8d 62 f8 lea -0x8(%r10),%rsp
* c3 retq
*
* and:
*
* 4c 8b 55 e8 mov -0x18(%rbp),%r10
* 48 8b 5d e0 mov -0x20(%rbp),%rbx
* 4c 8b 65 f0 mov -0x10(%rbp),%r12
* 4c 8b 6d f8 mov -0x8(%rbp),%r13
* c9 leaveq
* 49 8d 62 f8 lea -0x8(%r10),%rsp
* c3 retq
*
* Sometimes r13 is used as the DRAP register, in which case it's saved and
* restored beforehand:
*
* 41 55 push %r13
* 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
* 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
* ...
* 49 8d 65 f0 lea -0x10(%r13),%rsp
* 41 5d pop %r13
* c3 retq
*/
static int update_cfi_state(struct instruction *insn,
struct instruction *next_insn,
struct cfi_state *cfi, struct stack_op *op)
{
struct cfi_reg *cfa = &cfi->cfa;
struct cfi_reg *regs = cfi->regs;
/* ignore UNWIND_HINT_UNDEFINED regions */
if (cfi->force_undefined)
return 0;
/* stack operations don't make sense with an undefined CFA */
if (cfa->base == CFI_UNDEFINED) {
if (insn_func(insn)) {
WARN_INSN(insn, "undefined stack state");
return -1;
}
return 0;
}
if (cfi->type == UNWIND_HINT_TYPE_REGS ||
cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
return update_cfi_state_regs(insn, cfi, op);
switch (op->dest.type) {
case OP_DEST_REG:
switch (op->src.type) {
case OP_SRC_REG:
if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
cfa->base == CFI_SP &&
check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
/* mov %rsp, %rbp */
cfa->base = op->dest.reg;
cfi->bp_scratch = false;
}
else if (op->src.reg == CFI_SP &&
op->dest.reg == CFI_BP && cfi->drap) {
/* drap: mov %rsp, %rbp */
regs[CFI_BP].base = CFI_BP;
regs[CFI_BP].offset = -cfi->stack_size;
cfi->bp_scratch = false;
}
else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
/*
* mov %rsp, %reg
*
* This is needed for the rare case where GCC
* does:
*
* mov %rsp, %rax
* ...
* mov %rax, %rsp
*/
cfi->vals[op->dest.reg].base = CFI_CFA;
cfi->vals[op->dest.reg].offset = -cfi->stack_size;
}
else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
(cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
/*
* mov %rbp, %rsp
*
* Restore the original stack pointer (Clang).
*/
cfi->stack_size = -cfi->regs[CFI_BP].offset;
}
else if (op->dest.reg == cfa->base) {
/* mov %reg, %rsp */
if (cfa->base == CFI_SP &&
cfi->vals[op->src.reg].base == CFI_CFA) {
/*
* This is needed for the rare case
* where GCC does something dumb like:
*
* lea 0x8(%rsp), %rcx
* ...
* mov %rcx, %rsp
*/
cfa->offset = -cfi->vals[op->src.reg].offset;
cfi->stack_size = cfa->offset;
} else if (cfa->base == CFI_SP &&
cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
cfi->vals[op->src.reg].offset == cfa->offset) {
/*
* Stack swizzle:
*
* 1: mov %rsp, (%[tos])
* 2: mov %[tos], %rsp
* ...
* 3: pop %rsp
*
* Where:
*
* 1 - places a pointer to the previous
* stack at the Top-of-Stack of the
* new stack.
*
* 2 - switches to the new stack.
*
* 3 - pops the Top-of-Stack to restore
* the original stack.
*
* Note: we set base to SP_INDIRECT
* here and preserve offset. Therefore
* when the unwinder reaches ToS it
* will dereference SP and then add the
* offset to find the next frame, IOW:
* (%rsp) + offset.
*/
cfa->base = CFI_SP_INDIRECT;
} else {
cfa->base = CFI_UNDEFINED;
cfa->offset = 0;
}
}
else if (op->dest.reg == CFI_SP &&
cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
cfi->vals[op->src.reg].offset == cfa->offset) {
/*
* The same stack swizzle case 2) as above. But
* because we can't change cfa->base, case 3)
* will become a regular POP. Pretend we're a
* PUSH so things don't go unbalanced.
*/
cfi->stack_size += 8;
}
break;
case OP_SRC_ADD:
if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
/* add imm, %rsp */
cfi->stack_size -= op->src.offset;
if (cfa->base == CFI_SP)
cfa->offset -= op->src.offset;
break;
}
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
/* lea disp(%rbp), %rsp */
cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
break;
}
if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
/* drap: lea disp(%rsp), %drap */
cfi->drap_reg = op->dest.reg;
/*
* lea disp(%rsp), %reg
*
* This is needed for the rare case where GCC
* does something dumb like:
*
* lea 0x8(%rsp), %rcx
* ...
* mov %rcx, %rsp
*/
cfi->vals[op->dest.reg].base = CFI_CFA;
cfi->vals[op->dest.reg].offset = \
-cfi->stack_size + op->src.offset;
break;
}
if (cfi->drap && op->dest.reg == CFI_SP &&
op->src.reg == cfi->drap_reg) {
/* drap: lea disp(%drap), %rsp */
cfa->base = CFI_SP;
cfa->offset = cfi->stack_size = -op->src.offset;
cfi->drap_reg = CFI_UNDEFINED;
cfi->drap = false;
break;
}
if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
WARN_INSN(insn, "unsupported stack register modification");
return -1;
}
break;
case OP_SRC_AND:
if (op->dest.reg != CFI_SP ||
(cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
(cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
WARN_INSN(insn, "unsupported stack pointer realignment");
return -1;
}
if (cfi->drap_reg != CFI_UNDEFINED) {
/* drap: and imm, %rsp */
cfa->base = cfi->drap_reg;
cfa->offset = cfi->stack_size = 0;
cfi->drap = true;
}
/*
* Older versions of GCC (4.8ish) realign the stack
* without DRAP, with a frame pointer.
*/
break;
case OP_SRC_POP:
case OP_SRC_POPF:
if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
/* pop %rsp; # restore from a stack swizzle */
cfa->base = CFI_SP;
break;
}
if (!cfi->drap && op->dest.reg == cfa->base) {
/* pop %rbp */
cfa->base = CFI_SP;
}
if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
op->dest.reg == cfi->drap_reg &&
cfi->drap_offset == -cfi->stack_size) {
/* drap: pop %drap */
cfa->base = cfi->drap_reg;
cfa->offset = 0;
cfi->drap_offset = -1;
} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
/* pop %reg */
restore_reg(cfi, op->dest.reg);
}
cfi->stack_size -= 8;
if (cfa->base == CFI_SP)
cfa->offset -= 8;
break;
case OP_SRC_REG_INDIRECT:
if (!cfi->drap && op->dest.reg == cfa->base &&
op->dest.reg == CFI_BP) {
/* mov disp(%rsp), %rbp */
cfa->base = CFI_SP;
cfa->offset = cfi->stack_size;
}
if (cfi->drap && op->src.reg == CFI_BP &&
op->src.offset == cfi->drap_offset) {
/* drap: mov disp(%rbp), %drap */
cfa->base = cfi->drap_reg;
cfa->offset = 0;
cfi->drap_offset = -1;
}
if (cfi->drap && op->src.reg == CFI_BP &&
op->src.offset == regs[op->dest.reg].offset) {
/* drap: mov disp(%rbp), %reg */
restore_reg(cfi, op->dest.reg);
} else if (op->src.reg == cfa->base &&
op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
/* mov disp(%rbp), %reg */
/* mov disp(%rsp), %reg */
restore_reg(cfi, op->dest.reg);
} else if (op->src.reg == CFI_SP &&
op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
/* mov disp(%rsp), %reg */
restore_reg(cfi, op->dest.reg);
}
break;
default:
WARN_INSN(insn, "unknown stack-related instruction");
return -1;
}
break;
case OP_DEST_PUSH:
case OP_DEST_PUSHF:
cfi->stack_size += 8;
if (cfa->base == CFI_SP)
cfa->offset += 8;
if (op->src.type != OP_SRC_REG)
break;
if (cfi->drap) {
if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
/* drap: push %drap */
cfa->base = CFI_BP_INDIRECT;
cfa->offset = -cfi->stack_size;
/* save drap so we know when to restore it */
cfi->drap_offset = -cfi->stack_size;
} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
/* drap: push %rbp */
cfi->stack_size = 0;
} else {
/* drap: push %reg */
save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
}
} else {
/* push %reg */
save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
}
/* detect when asm code uses rbp as a scratch register */
if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
cfa->base != CFI_BP)
cfi->bp_scratch = true;
break;
case OP_DEST_REG_INDIRECT:
if (cfi->drap) {
if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
/* drap: mov %drap, disp(%rbp) */
cfa->base = CFI_BP_INDIRECT;
cfa->offset = op->dest.offset;
/* save drap offset so we know when to restore it */
cfi->drap_offset = op->dest.offset;
} else {
/* drap: mov reg, disp(%rbp) */
save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
}
} else if (op->dest.reg == cfa->base) {
/* mov reg, disp(%rbp) */
/* mov reg, disp(%rsp) */
save_reg(cfi, op->src.reg, CFI_CFA,
op->dest.offset - cfi->cfa.offset);
} else if (op->dest.reg == CFI_SP) {
/* mov reg, disp(%rsp) */
save_reg(cfi, op->src.reg, CFI_CFA,
op->dest.offset - cfi->stack_size);
} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
/* mov %rsp, (%reg); # setup a stack swizzle. */
cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
cfi->vals[op->dest.reg].offset = cfa->offset;
}
break;
case OP_DEST_MEM:
if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
WARN_INSN(insn, "unknown stack-related memory operation");
return -1;
}
/* pop mem */
cfi->stack_size -= 8;
if (cfa->base == CFI_SP)
cfa->offset -= 8;
break;
default:
WARN_INSN(insn, "unknown stack-related instruction");
return -1;
}
return 0;
}
/*
* The stack layouts of alternatives instructions can sometimes diverge when
* they have stack modifications. That's fine as long as the potential stack
* layouts don't conflict at any given potential instruction boundary.
*
* Flatten the CFIs of the different alternative code streams (both original
* and replacement) into a single shared CFI array which can be used to detect
* conflicts and nicely feed a linear array of ORC entries to the unwinder.
*/
static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
{
struct cfi_state **alt_cfi;
int group_off;
if (!insn->alt_group)
return 0;
if (!insn->cfi) {
WARN("CFI missing");
return -1;
}
alt_cfi = insn->alt_group->cfi;
group_off = insn->offset - insn->alt_group->first_insn->offset;
if (!alt_cfi[group_off]) {
alt_cfi[group_off] = insn->cfi;
} else {
if (cficmp(alt_cfi[group_off], insn->cfi)) {
struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
struct instruction *orig = orig_group->first_insn;
char *where = offstr(insn->sec, insn->offset);
WARN_INSN(orig, "stack layout conflict in alternatives: %s", where);
free(where);
return -1;
}
}
return 0;
}
static int handle_insn_ops(struct instruction *insn,
struct instruction *next_insn,
struct insn_state *state)
{
struct stack_op *op;
for (op = insn->stack_ops; op; op = op->next) {
if (update_cfi_state(insn, next_insn, &state->cfi, op))
return 1;
if (!insn->alt_group)
continue;
if (op->dest.type == OP_DEST_PUSHF) {
if (!state->uaccess_stack) {
state->uaccess_stack = 1;
} else if (state->uaccess_stack >> 31) {
WARN_INSN(insn, "PUSHF stack exhausted");
return 1;
}
state->uaccess_stack <<= 1;
state->uaccess_stack |= state->uaccess;
}
if (op->src.type == OP_SRC_POPF) {
if (state->uaccess_stack) {
state->uaccess = state->uaccess_stack & 1;
state->uaccess_stack >>= 1;
if (state->uaccess_stack == 1)
state->uaccess_stack = 0;
}
}
}
return 0;
}
static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
{
struct cfi_state *cfi1 = insn->cfi;
int i;
if (!cfi1) {
WARN("CFI missing");
return false;
}
if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
cfi1->cfa.base, cfi1->cfa.offset,
cfi2->cfa.base, cfi2->cfa.offset);
} else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
for (i = 0; i < CFI_NUM_REGS; i++) {
if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
sizeof(struct cfi_reg)))
continue;
WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
i, cfi1->regs[i].base, cfi1->regs[i].offset,
i, cfi2->regs[i].base, cfi2->regs[i].offset);
break;
}
} else if (cfi1->type != cfi2->type) {
WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
cfi1->type, cfi2->type);
} else if (cfi1->drap != cfi2->drap ||
(cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
(cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
} else
return true;
return false;
}
static inline bool func_uaccess_safe(struct symbol *func)
{
if (func)
return func->uaccess_safe;
return false;
}
static inline const char *call_dest_name(struct instruction *insn)
{
static char pvname[19];
struct reloc *reloc;
int idx;
if (insn_call_dest(insn))
return insn_call_dest(insn)->name;
reloc = insn_reloc(NULL, insn);
if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
idx = (reloc_addend(reloc) / sizeof(void *));
snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
return pvname;
}
return "{dynamic}";
}
static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
{
struct symbol *target;
struct reloc *reloc;
int idx;
reloc = insn_reloc(file, insn);
if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
return false;
idx = (arch_dest_reloc_offset(reloc_addend(reloc)) / sizeof(void *));
if (file->pv_ops[idx].clean)
return true;
file->pv_ops[idx].clean = true;
list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
if (!target->sec->noinstr) {
WARN("pv_ops[%d]: %s", idx, target->name);
file->pv_ops[idx].clean = false;
}
}
return file->pv_ops[idx].clean;
}
static inline bool noinstr_call_dest(struct objtool_file *file,
struct instruction *insn,
struct symbol *func)
{
/*
* We can't deal with indirect function calls at present;
* assume they're instrumented.
*/
if (!func) {
if (file->pv_ops)
return pv_call_dest(file, insn);
return false;
}
/*
* If the symbol is from a noinstr section; we good.
*/
if (func->sec->noinstr)
return true;
/*
* If the symbol is a static_call trampoline, we can't tell.
*/
if (func->static_call_tramp)
return true;
/*
* The __ubsan_handle_*() calls are like WARN(), they only happen when
* something 'BAD' happened. At the risk of taking the machine down,
* let them proceed to get the message out.
*/
if (!strncmp(func->name, "__ubsan_handle_", 15))
return true;
return false;
}
static int validate_call(struct objtool_file *file,
struct instruction *insn,
struct insn_state *state)
{
if (state->noinstr && state->instr <= 0 &&
!noinstr_call_dest(file, insn, insn_call_dest(insn))) {
WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
return 1;
}
if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
return 1;
}
if (state->df) {
WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
return 1;
}
return 0;
}
static int validate_sibling_call(struct objtool_file *file,
struct instruction *insn,
struct insn_state *state)
{
if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
return 1;
}
return validate_call(file, insn, state);
}
static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
{
if (state->noinstr && state->instr > 0) {
WARN_INSN(insn, "return with instrumentation enabled");
return 1;
}
if (state->uaccess && !func_uaccess_safe(func)) {
WARN_INSN(insn, "return with UACCESS enabled");
return 1;
}
if (!state->uaccess && func_uaccess_safe(func)) {
WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
return 1;
}
if (state->df) {
WARN_INSN(insn, "return with DF set");
return 1;
}
if (func && has_modified_stack_frame(insn, state)) {
WARN_INSN(insn, "return with modified stack frame");
return 1;
}
if (state->cfi.bp_scratch) {
WARN_INSN(insn, "BP used as a scratch register");
return 1;
}
return 0;
}
static struct instruction *next_insn_to_validate(struct objtool_file *file,
struct instruction *insn)
{
struct alt_group *alt_group = insn->alt_group;
/*
* Simulate the fact that alternatives are patched in-place. When the
* end of a replacement alt_group is reached, redirect objtool flow to
* the end of the original alt_group.
*
* insn->alts->insn -> alt_group->first_insn
* ...
* alt_group->last_insn
* [alt_group->nop] -> next(orig_group->last_insn)
*/
if (alt_group) {
if (alt_group->nop) {
/* ->nop implies ->orig_group */
if (insn == alt_group->last_insn)
return alt_group->nop;
if (insn == alt_group->nop)
goto next_orig;
}
if (insn == alt_group->last_insn && alt_group->orig_group)
goto next_orig;
}
return next_insn_same_sec(file, insn);
next_orig:
return next_insn_same_sec(file, alt_group->orig_group->last_insn);
}
/*
* Follow the branch starting at the given instruction, and recursively follow
* any other branches (jumps). Meanwhile, track the frame pointer state at
* each instruction and validate all the rules described in
* tools/objtool/Documentation/objtool.txt.
*/
static int validate_branch(struct objtool_file *file, struct symbol *func,
struct instruction *insn, struct insn_state state)
{
struct alternative *alt;
struct instruction *next_insn, *prev_insn = NULL;
struct section *sec;
u8 visited;
int ret;
sec = insn->sec;
while (1) {
next_insn = next_insn_to_validate(file, insn);
if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
/* Ignore KCFI type preambles, which always fall through */
if (!strncmp(func->name, "__cfi_", 6) ||
!strncmp(func->name, "__pfx_", 6))
return 0;
WARN("%s() falls through to next function %s()",
func->name, insn_func(insn)->name);
return 1;
}
if (func && insn->ignore) {
WARN_INSN(insn, "BUG: why am I validating an ignored function?");
return 1;
}
visited = VISITED_BRANCH << state.uaccess;
if (insn->visited & VISITED_BRANCH_MASK) {
if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
return 1;
if (insn->visited & visited)
return 0;
} else {
nr_insns_visited++;
}
if (state.noinstr)
state.instr += insn->instr;
if (insn->hint) {
if (insn->restore) {
struct instruction *save_insn, *i;
i = insn;
save_insn = NULL;
sym_for_each_insn_continue_reverse(file, func, i) {
if (i->save) {
save_insn = i;
break;
}
}
if (!save_insn) {
WARN_INSN(insn, "no corresponding CFI save for CFI restore");
return 1;
}
if (!save_insn->visited) {
WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
return 1;
}
insn->cfi = save_insn->cfi;
nr_cfi_reused++;
}
state.cfi = *insn->cfi;
} else {
/* XXX track if we actually changed state.cfi */
if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
insn->cfi = prev_insn->cfi;
nr_cfi_reused++;
} else {
insn->cfi = cfi_hash_find_or_add(&state.cfi);
}
}
insn->visited |= visited;
if (propagate_alt_cfi(file, insn))
return 1;
if (!insn->ignore_alts && insn->alts) {
bool skip_orig = false;
for (alt = insn->alts; alt; alt = alt->next) {
if (alt->skip_orig)
skip_orig = true;
ret = validate_branch(file, func, alt->insn, state);
if (ret) {
BT_INSN(insn, "(alt)");
return ret;
}
}
if (skip_orig)
return 0;
}
if (handle_insn_ops(insn, next_insn, &state))
return 1;
switch (insn->type) {
case INSN_RETURN:
return validate_return(func, insn, &state);
case INSN_CALL:
case INSN_CALL_DYNAMIC:
ret = validate_call(file, insn, &state);
if (ret)
return ret;
if (opts.stackval && func && !is_special_call(insn) &&
!has_valid_stack_frame(&state)) {
WARN_INSN(insn, "call without frame pointer save/setup");
return 1;
}
if (insn->dead_end)
return 0;
break;
case INSN_JUMP_CONDITIONAL:
case INSN_JUMP_UNCONDITIONAL:
if (is_sibling_call(insn)) {
ret = validate_sibling_call(file, insn, &state);
if (ret)
return ret;
} else if (insn->jump_dest) {
ret = validate_branch(file, func,
insn->jump_dest, state);
if (ret) {
BT_INSN(insn, "(branch)");
return ret;
}
}
if (insn->type == INSN_JUMP_UNCONDITIONAL)
return 0;
break;
case INSN_JUMP_DYNAMIC:
case INSN_JUMP_DYNAMIC_CONDITIONAL:
if (is_sibling_call(insn)) {
ret = validate_sibling_call(file, insn, &state);
if (ret)
return ret;
}
if (insn->type == INSN_JUMP_DYNAMIC)
return 0;
break;
case INSN_CONTEXT_SWITCH:
if (func && (!next_insn || !next_insn->hint)) {
WARN_INSN(insn, "unsupported instruction in callable function");
return 1;
}
return 0;
case INSN_STAC:
if (state.uaccess) {
WARN_INSN(insn, "recursive UACCESS enable");
return 1;
}
state.uaccess = true;
break;
case INSN_CLAC:
if (!state.uaccess && func) {
WARN_INSN(insn, "redundant UACCESS disable");
return 1;
}
if (func_uaccess_safe(func) && !state.uaccess_stack) {
WARN_INSN(insn, "UACCESS-safe disables UACCESS");
return 1;
}
state.uaccess = false;
break;
case INSN_STD:
if (state.df) {
WARN_INSN(insn, "recursive STD");
return 1;
}
state.df = true;
break;
case INSN_CLD:
if (!state.df && func) {
WARN_INSN(insn, "redundant CLD");
return 1;
}
state.df = false;
break;
default:
break;
}
if (insn->dead_end)
return 0;
if (!next_insn) {
if (state.cfi.cfa.base == CFI_UNDEFINED)
return 0;
WARN("%s: unexpected end of section", sec->name);
return 1;
}
prev_insn = insn;
insn = next_insn;
}
return 0;
}
static int validate_unwind_hint(struct objtool_file *file,
struct instruction *insn,
struct insn_state *state)
{
if (insn->hint && !insn->visited && !insn->ignore) {
int ret = validate_branch(file, insn_func(insn), insn, *state);
if (ret)
BT_INSN(insn, "<=== (hint)");
return ret;
}
return 0;
}
static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
{
struct instruction *insn;
struct insn_state state;
int warnings = 0;
if (!file->hints)
return 0;
init_insn_state(file, &state, sec);
if (sec) {
sec_for_each_insn(file, sec, insn)
warnings += validate_unwind_hint(file, insn, &state);
} else {
for_each_insn(file, insn)
warnings += validate_unwind_hint(file, insn, &state);
}
return warnings;
}
/*
* Validate rethunk entry constraint: must untrain RET before the first RET.
*
* Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
* before an actual RET instruction.
*/
static int validate_unret(struct objtool_file *file, struct instruction *insn)
{
struct instruction *next, *dest;
int ret;
for (;;) {
next = next_insn_to_validate(file, insn);
if (insn->visited & VISITED_UNRET)
return 0;
insn->visited |= VISITED_UNRET;
if (!insn->ignore_alts && insn->alts) {
struct alternative *alt;
bool skip_orig = false;
for (alt = insn->alts; alt; alt = alt->next) {
if (alt->skip_orig)
skip_orig = true;
ret = validate_unret(file, alt->insn);
if (ret) {
BT_INSN(insn, "(alt)");
return ret;
}
}
if (skip_orig)
return 0;
}
switch (insn->type) {
case INSN_CALL_DYNAMIC:
case INSN_JUMP_DYNAMIC:
case INSN_JUMP_DYNAMIC_CONDITIONAL:
WARN_INSN(insn, "early indirect call");
return 1;
case INSN_JUMP_UNCONDITIONAL:
case INSN_JUMP_CONDITIONAL:
if (!is_sibling_call(insn)) {
if (!insn->jump_dest) {
WARN_INSN(insn, "unresolved jump target after linking?!?");
return -1;
}
ret = validate_unret(file, insn->jump_dest);
if (ret) {
BT_INSN(insn, "(branch%s)",
insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
return ret;
}
if (insn->type == INSN_JUMP_UNCONDITIONAL)
return 0;
break;
}
/* fallthrough */
case INSN_CALL:
dest = find_insn(file, insn_call_dest(insn)->sec,
insn_call_dest(insn)->offset);
if (!dest) {
WARN("Unresolved function after linking!?: %s",
insn_call_dest(insn)->name);
return -1;
}
ret = validate_unret(file, dest);
if (ret) {
BT_INSN(insn, "(call)");
return ret;
}
/*
* If a call returns without error, it must have seen UNTRAIN_RET.
* Therefore any non-error return is a success.
*/
return 0;
case INSN_RETURN:
WARN_INSN(insn, "RET before UNTRAIN");
return 1;
case INSN_NOP:
if (insn->retpoline_safe)
return 0;
break;
default:
break;
}
if (!next) {
WARN_INSN(insn, "teh end!");
return -1;
}
insn = next;
}
return 0;
}
/*
* Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
* VALIDATE_UNRET_END before RET.
*/
static int validate_unrets(struct objtool_file *file)
{
struct instruction *insn;
int ret, warnings = 0;
for_each_insn(file, insn) {
if (!insn->unret)
continue;
ret = validate_unret(file, insn);
if (ret < 0) {
WARN_INSN(insn, "Failed UNRET validation");
return ret;
}
warnings += ret;
}
return warnings;
}
static int validate_retpoline(struct objtool_file *file)
{
struct instruction *insn;
int warnings = 0;
for_each_insn(file, insn) {
if (insn->type != INSN_JUMP_DYNAMIC &&
insn->type != INSN_CALL_DYNAMIC &&
insn->type != INSN_RETURN)
continue;
if (insn->retpoline_safe)
continue;
if (insn->sec->init)
continue;
if (insn->type == INSN_RETURN) {
if (opts.rethunk) {
WARN_INSN(insn, "'naked' return found in RETHUNK build");
} else
continue;
} else {
WARN_INSN(insn, "indirect %s found in RETPOLINE build",
insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
}
warnings++;
}
return warnings;
}
static bool is_kasan_insn(struct instruction *insn)
{
return (insn->type == INSN_CALL &&
!strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
}
static bool is_ubsan_insn(struct instruction *insn)
{
return (insn->type == INSN_CALL &&
!strcmp(insn_call_dest(insn)->name,
"__ubsan_handle_builtin_unreachable"));
}
static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
{
int i;
struct instruction *prev_insn;
if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
return true;
/*
* Ignore alternative replacement instructions. This can happen
* when a whitelisted function uses one of the ALTERNATIVE macros.
*/
if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
!strcmp(insn->sec->name, ".altinstr_aux"))
return true;
/*
* Whole archive runs might encounter dead code from weak symbols.
* This is where the linker will have dropped the weak symbol in
* favour of a regular symbol, but leaves the code in place.
*
* In this case we'll find a piece of code (whole function) that is not
* covered by a !section symbol. Ignore them.
*/
if (opts.link && !insn_func(insn)) {
int size = find_symbol_hole_containing(insn->sec, insn->offset);
unsigned long end = insn->offset + size;
if (!size) /* not a hole */
return false;
if (size < 0) /* hole until the end */
return true;
sec_for_each_insn_continue(file, insn) {
/*
* If we reach a visited instruction at or before the
* end of the hole, ignore the unreachable.
*/
if (insn->visited)
return true;
if (insn->offset >= end)
break;
/*
* If this hole jumps to a .cold function, mark it ignore too.
*/
if (insn->jump_dest && insn_func(insn->jump_dest) &&
strstr(insn_func(insn->jump_dest)->name, ".cold")) {
struct instruction *dest = insn->jump_dest;
func_for_each_insn(file, insn_func(dest), dest)
dest->ignore = true;
}
}
return false;
}
if (!insn_func(insn))
return false;
if (insn_func(insn)->static_call_tramp)
return true;
/*
* CONFIG_UBSAN_TRAP inserts a UD2 when it sees
* __builtin_unreachable(). The BUG() macro has an unreachable() after
* the UD2, which causes GCC's undefined trap logic to emit another UD2
* (or occasionally a JMP to UD2).
*
* It may also insert a UD2 after calling a __noreturn function.
*/
prev_insn = prev_insn_same_sec(file, insn);
if (prev_insn->dead_end &&
(insn->type == INSN_BUG ||
(insn->type == INSN_JUMP_UNCONDITIONAL &&
insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
return true;
/*
* Check if this (or a subsequent) instruction is related to
* CONFIG_UBSAN or CONFIG_KASAN.
*
* End the search at 5 instructions to avoid going into the weeds.
*/
for (i = 0; i < 5; i++) {
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
return true;
if (insn->type == INSN_JUMP_UNCONDITIONAL) {
if (insn->jump_dest &&
insn_func(insn->jump_dest) == insn_func(insn)) {
insn = insn->jump_dest;
continue;
}
break;
}
if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
break;
insn = next_insn_same_sec(file, insn);
}
return false;
}
static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
{
struct instruction *insn, *prev;
struct cfi_state *cfi;
insn = find_insn(file, func->sec, func->offset);
if (!insn)
return -1;
for (prev = prev_insn_same_sec(file, insn);
prev;
prev = prev_insn_same_sec(file, prev)) {
u64 offset;
if (prev->type != INSN_NOP)
return -1;
offset = func->offset - prev->offset;
if (offset > opts.prefix)
return -1;
if (offset < opts.prefix)
continue;
elf_create_prefix_symbol(file->elf, func, opts.prefix);
break;
}
if (!prev)
return -1;
if (!insn->cfi) {
/*
* This can happen if stack validation isn't enabled or the
* function is annotated with STACK_FRAME_NON_STANDARD.
*/
return 0;
}
/* Propagate insn->cfi to the prefix code */
cfi = cfi_hash_find_or_add(insn->cfi);
for (; prev != insn; prev = next_insn_same_sec(file, prev))
prev->cfi = cfi;
return 0;
}
static int add_prefix_symbols(struct objtool_file *file)
{
struct section *sec;
struct symbol *func;
for_each_sec(file, sec) {
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
continue;
sec_for_each_sym(sec, func) {
if (func->type != STT_FUNC)
continue;
add_prefix_symbol(file, func);
}
}
return 0;
}
static int validate_symbol(struct objtool_file *file, struct section *sec,
struct symbol *sym, struct insn_state *state)
{
struct instruction *insn;
int ret;
if (!sym->len) {
WARN("%s() is missing an ELF size annotation", sym->name);
return 1;
}
if (sym->pfunc != sym || sym->alias != sym)
return 0;
insn = find_insn(file, sec, sym->offset);
if (!insn || insn->ignore || insn->visited)
return 0;
state->uaccess = sym->uaccess_safe;
ret = validate_branch(file, insn_func(insn), insn, *state);
if (ret)
BT_INSN(insn, "<=== (sym)");
return ret;
}
static int validate_section(struct objtool_file *file, struct section *sec)
{
struct insn_state state;
struct symbol *func;
int warnings = 0;
sec_for_each_sym(sec, func) {
if (func->type != STT_FUNC)
continue;
init_insn_state(file, &state, sec);
set_func_state(&state.cfi);
warnings += validate_symbol(file, sec, func, &state);
}
return warnings;
}
static int validate_noinstr_sections(struct objtool_file *file)
{
struct section *sec;
int warnings = 0;
sec = find_section_by_name(file->elf, ".noinstr.text");
if (sec) {
warnings += validate_section(file, sec);
warnings += validate_unwind_hints(file, sec);
}
sec = find_section_by_name(file->elf, ".entry.text");
if (sec) {
warnings += validate_section(file, sec);
warnings += validate_unwind_hints(file, sec);
}
sec = find_section_by_name(file->elf, ".cpuidle.text");
if (sec) {
warnings += validate_section(file, sec);
warnings += validate_unwind_hints(file, sec);
}
return warnings;
}
static int validate_functions(struct objtool_file *file)
{
struct section *sec;
int warnings = 0;
for_each_sec(file, sec) {
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
continue;
warnings += validate_section(file, sec);
}
return warnings;
}
static void mark_endbr_used(struct instruction *insn)
{
if (!list_empty(&insn->call_node))
list_del_init(&insn->call_node);
}
static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
{
struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
struct instruction *first;
if (!sym)
return false;
first = find_insn(file, sym->sec, sym->offset);
if (!first)
return false;
if (first->type != INSN_ENDBR && !first->noendbr)
return false;
return insn->offset == sym->offset + sym->len;
}
static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
{
struct instruction *dest;
struct reloc *reloc;
unsigned long off;
int warnings = 0;
/*
* Looking for function pointer load relocations. Ignore
* direct/indirect branches:
*/
switch (insn->type) {
case INSN_CALL:
case INSN_CALL_DYNAMIC:
case INSN_JUMP_CONDITIONAL:
case INSN_JUMP_UNCONDITIONAL:
case INSN_JUMP_DYNAMIC:
case INSN_JUMP_DYNAMIC_CONDITIONAL:
case INSN_RETURN:
case INSN_NOP:
return 0;
default:
break;
}
for (reloc = insn_reloc(file, insn);
reloc;
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
reloc_offset(reloc) + 1,
(insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
/*
* static_call_update() references the trampoline, which
* doesn't have (or need) ENDBR. Skip warning in that case.
*/
if (reloc->sym->static_call_tramp)
continue;
off = reloc->sym->offset;
if (reloc_type(reloc) == R_X86_64_PC32 ||
reloc_type(reloc) == R_X86_64_PLT32)
off += arch_dest_reloc_offset(reloc_addend(reloc));
else
off += reloc_addend(reloc);
dest = find_insn(file, reloc->sym->sec, off);
if (!dest)
continue;
if (dest->type == INSN_ENDBR) {
mark_endbr_used(dest);
continue;
}
if (insn_func(dest) && insn_func(insn) &&
insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
/*
* Anything from->to self is either _THIS_IP_ or
* IRET-to-self.
*
* There is no sane way to annotate _THIS_IP_ since the
* compiler treats the relocation as a constant and is
* happy to fold in offsets, skewing any annotation we
* do, leading to vast amounts of false-positives.
*
* There's also compiler generated _THIS_IP_ through
* KCOV and such which we have no hope of annotating.
*
* As such, blanket accept self-references without
* issue.
*/
continue;
}
/*
* Accept anything ANNOTATE_NOENDBR.
*/
if (dest->noendbr)
continue;
/*
* Accept if this is the instruction after a symbol
* that is (no)endbr -- typical code-range usage.
*/
if (noendbr_range(file, dest))
continue;
WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
warnings++;
}
return warnings;
}
static int validate_ibt_data_reloc(struct objtool_file *file,
struct reloc *reloc)
{
struct instruction *dest;
dest = find_insn(file, reloc->sym->sec,
reloc->sym->offset + reloc_addend(reloc));
if (!dest)
return 0;
if (dest->type == INSN_ENDBR) {
mark_endbr_used(dest);
return 0;
}
if (dest->noendbr)
return 0;
WARN_FUNC("data relocation to !ENDBR: %s",
reloc->sec->base, reloc_offset(reloc),
offstr(dest->sec, dest->offset));
return 1;
}
/*
* Validate IBT rules and remove used ENDBR instructions from the seal list.
* Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
* NOPs) later, in create_ibt_endbr_seal_sections().
*/
static int validate_ibt(struct objtool_file *file)
{
struct section *sec;
struct reloc *reloc;
struct instruction *insn;
int warnings = 0;
for_each_insn(file, insn)
warnings += validate_ibt_insn(file, insn);
for_each_sec(file, sec) {
/* Already done by validate_ibt_insn() */
if (sec->sh.sh_flags & SHF_EXECINSTR)
continue;
if (!sec->rsec)
continue;
/*
* These sections can reference text addresses, but not with
* the intent to indirect branch to them.
*/
if ((!strncmp(sec->name, ".discard", 8) &&
strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
!strncmp(sec->name, ".debug", 6) ||
!strcmp(sec->name, ".altinstructions") ||
!strcmp(sec->name, ".ibt_endbr_seal") ||
!strcmp(sec->name, ".orc_unwind_ip") ||
!strcmp(sec->name, ".parainstructions") ||
!strcmp(sec->name, ".retpoline_sites") ||
!strcmp(sec->name, ".smp_locks") ||
!strcmp(sec->name, ".static_call_sites") ||
!strcmp(sec->name, "_error_injection_whitelist") ||
!strcmp(sec->name, "_kprobe_blacklist") ||
!strcmp(sec->name, "__bug_table") ||
!strcmp(sec->name, "__ex_table") ||
!strcmp(sec->name, "__jump_table") ||
!strcmp(sec->name, "__mcount_loc") ||
!strcmp(sec->name, ".kcfi_traps") ||
strstr(sec->name, "__patchable_function_entries"))
continue;
for_each_reloc(sec->rsec, reloc)
warnings += validate_ibt_data_reloc(file, reloc);
}
return warnings;
}
static int validate_sls(struct objtool_file *file)
{
struct instruction *insn, *next_insn;
int warnings = 0;
for_each_insn(file, insn) {
next_insn = next_insn_same_sec(file, insn);
if (insn->retpoline_safe)
continue;
switch (insn->type) {
case INSN_RETURN:
if (!next_insn || next_insn->type != INSN_TRAP) {
WARN_INSN(insn, "missing int3 after ret");
warnings++;
}
break;
case INSN_JUMP_DYNAMIC:
if (!next_insn || next_insn->type != INSN_TRAP) {
WARN_INSN(insn, "missing int3 after indirect jump");
warnings++;
}
break;
default:
break;
}
}
return warnings;
}
static bool ignore_noreturn_call(struct instruction *insn)
{
struct symbol *call_dest = insn_call_dest(insn);
/*
* FIXME: hack, we need a real noreturn solution
*
* Problem is, exc_double_fault() may or may not return, depending on
* whether CONFIG_X86_ESPFIX64 is set. But objtool has no visibility
* to the kernel config.
*
* Other potential ways to fix it:
*
* - have compiler communicate __noreturn functions somehow
* - remove CONFIG_X86_ESPFIX64
* - read the .config file
* - add a cmdline option
* - create a generic objtool annotation format (vs a bunch of custom
* formats) and annotate it
*/
if (!strcmp(call_dest->name, "exc_double_fault")) {
/* prevent further unreachable warnings for the caller */
insn->sym->warned = 1;
return true;
}
return false;
}
static int validate_reachable_instructions(struct objtool_file *file)
{
struct instruction *insn, *prev_insn;
struct symbol *call_dest;
int warnings = 0;
if (file->ignore_unreachables)
return 0;
for_each_insn(file, insn) {
if (insn->visited || ignore_unreachable_insn(file, insn))
continue;
prev_insn = prev_insn_same_sec(file, insn);
if (prev_insn && prev_insn->dead_end) {
call_dest = insn_call_dest(prev_insn);
if (call_dest && !ignore_noreturn_call(prev_insn)) {
WARN_INSN(insn, "%s() is missing a __noreturn annotation",
call_dest->name);
warnings++;
continue;
}
}
WARN_INSN(insn, "unreachable instruction");
warnings++;
}
return warnings;
}
/* 'funcs' is a space-separated list of function names */
static int disas_funcs(const char *funcs)
{
const char *objdump_str, *cross_compile;
int size, ret;
char *cmd;
cross_compile = getenv("CROSS_COMPILE");
objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
"BEGIN { split(_funcs, funcs); }"
"/^$/ { func_match = 0; }"
"/<.*>:/ { "
"f = gensub(/.*<(.*)>:/, \"\\\\1\", 1);"
"for (i in funcs) {"
"if (funcs[i] == f) {"
"func_match = 1;"
"base = strtonum(\"0x\" $1);"
"break;"
"}"
"}"
"}"
"{"
"if (func_match) {"
"addr = strtonum(\"0x\" $1);"
"printf(\"%%04x \", addr - base);"
"print;"
"}"
"}' 1>&2";
/* fake snprintf() to calculate the size */
size = snprintf(NULL, 0, objdump_str, cross_compile, objname, funcs) + 1;
if (size <= 0) {
WARN("objdump string size calculation failed");
return -1;
}
cmd = malloc(size);
/* real snprintf() */
snprintf(cmd, size, objdump_str, cross_compile, objname, funcs);
ret = system(cmd);
if (ret) {
WARN("disassembly failed: %d", ret);
return -1;
}
return 0;
}
static int disas_warned_funcs(struct objtool_file *file)
{
struct symbol *sym;
char *funcs = NULL, *tmp;
for_each_sym(file, sym) {
if (sym->warned) {
if (!funcs) {
funcs = malloc(strlen(sym->name) + 1);
strcpy(funcs, sym->name);
} else {
tmp = malloc(strlen(funcs) + strlen(sym->name) + 2);
sprintf(tmp, "%s %s", funcs, sym->name);
free(funcs);
funcs = tmp;
}
}
}
if (funcs)
disas_funcs(funcs);
return 0;
}
struct insn_chunk {
void *addr;
struct insn_chunk *next;
};
/*
* Reduce peak RSS usage by freeing insns memory before writing the ELF file,
* which can trigger more allocations for .debug_* sections whose data hasn't
* been read yet.
*/
static void free_insns(struct objtool_file *file)
{
struct instruction *insn;
struct insn_chunk *chunks = NULL, *chunk;
for_each_insn(file, insn) {
if (!insn->idx) {
chunk = malloc(sizeof(*chunk));
chunk->addr = insn;
chunk->next = chunks;
chunks = chunk;
}
}
for (chunk = chunks; chunk; chunk = chunk->next)
free(chunk->addr);
}
int check(struct objtool_file *file)
{
int ret, warnings = 0;
arch_initial_func_cfi_state(&initial_func_cfi);
init_cfi_state(&init_cfi);
init_cfi_state(&func_cfi);
set_func_state(&func_cfi);
init_cfi_state(&force_undefined_cfi);
force_undefined_cfi.force_undefined = true;
if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
goto out;
cfi_hash_add(&init_cfi);
cfi_hash_add(&func_cfi);
ret = decode_sections(file);
if (ret < 0)
goto out;
warnings += ret;
if (!nr_insns)
goto out;
if (opts.retpoline) {
ret = validate_retpoline(file);
if (ret < 0)
return ret;
warnings += ret;
}
if (opts.stackval || opts.orc || opts.uaccess) {
ret = validate_functions(file);
if (ret < 0)
goto out;
warnings += ret;
ret = validate_unwind_hints(file, NULL);
if (ret < 0)
goto out;
warnings += ret;
if (!warnings) {
ret = validate_reachable_instructions(file);
if (ret < 0)
goto out;
warnings += ret;
}
} else if (opts.noinstr) {
ret = validate_noinstr_sections(file);
if (ret < 0)
goto out;
warnings += ret;
}
if (opts.unret) {
/*
* Must be after validate_branch() and friends, it plays
* further games with insn->visited.
*/
ret = validate_unrets(file);
if (ret < 0)
return ret;
warnings += ret;
}
if (opts.ibt) {
ret = validate_ibt(file);
if (ret < 0)
goto out;
warnings += ret;
}
if (opts.sls) {
ret = validate_sls(file);
if (ret < 0)
goto out;
warnings += ret;
}
if (opts.static_call) {
ret = create_static_call_sections(file);
if (ret < 0)
goto out;
warnings += ret;
}
if (opts.retpoline) {
ret = create_retpoline_sites_sections(file);
if (ret < 0)
goto out;
warnings += ret;
}
if (opts.cfi) {
ret = create_cfi_sections(file);
if (ret < 0)
goto out;
warnings += ret;
}
if (opts.rethunk) {
ret = create_return_sites_sections(file);
if (ret < 0)
goto out;
warnings += ret;
if (opts.hack_skylake) {
ret = create_direct_call_sections(file);
if (ret < 0)
goto out;
warnings += ret;
}
}
if (opts.mcount) {
ret = create_mcount_loc_sections(file);
if (ret < 0)
goto out;
warnings += ret;
}
if (opts.prefix) {
ret = add_prefix_symbols(file);
if (ret < 0)
return ret;
warnings += ret;
}
if (opts.ibt) {
ret = create_ibt_endbr_seal_sections(file);
if (ret < 0)
goto out;
warnings += ret;
}
if (opts.orc && nr_insns) {
ret = orc_create(file);
if (ret < 0)
goto out;
warnings += ret;
}
free_insns(file);
if (opts.verbose)
disas_warned_funcs(file);
if (opts.stats) {
printf("nr_insns_visited: %ld\n", nr_insns_visited);
printf("nr_cfi: %ld\n", nr_cfi);
printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
}
out:
/*
* For now, don't fail the kernel build on fatal warnings. These
* errors are still fairly common due to the growing matrix of
* supported toolchains and their recent pace of change.
*/
return 0;
}
| linux-master | tools/objtool/check.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015-2017 Josh Poimboeuf <[email protected]>
*/
#include <subcmd/parse-options.h>
#include <string.h>
#include <stdlib.h>
#include <objtool/builtin.h>
#include <objtool/objtool.h>
#define ERROR(format, ...) \
fprintf(stderr, \
"error: objtool: " format "\n", \
##__VA_ARGS__)
struct opts opts;
static const char * const check_usage[] = {
"objtool <actions> [<options>] file.o",
NULL,
};
static const char * const env_usage[] = {
"OBJTOOL_ARGS=\"<options>\"",
NULL,
};
static int parse_dump(const struct option *opt, const char *str, int unset)
{
if (!str || !strcmp(str, "orc")) {
opts.dump_orc = true;
return 0;
}
return -1;
}
static int parse_hacks(const struct option *opt, const char *str, int unset)
{
bool found = false;
/*
* Use strstr() as a lazy method of checking for comma-separated
* options.
*
* No string provided == enable all options.
*/
if (!str || strstr(str, "jump_label")) {
opts.hack_jump_label = true;
found = true;
}
if (!str || strstr(str, "noinstr")) {
opts.hack_noinstr = true;
found = true;
}
if (!str || strstr(str, "skylake")) {
opts.hack_skylake = true;
found = true;
}
return found ? 0 : -1;
}
static const struct option check_options[] = {
OPT_GROUP("Actions:"),
OPT_CALLBACK_OPTARG('h', "hacks", NULL, NULL, "jump_label,noinstr,skylake", "patch toolchain bugs/limitations", parse_hacks),
OPT_BOOLEAN('i', "ibt", &opts.ibt, "validate and annotate IBT"),
OPT_BOOLEAN('m', "mcount", &opts.mcount, "annotate mcount/fentry calls for ftrace"),
OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"),
OPT_BOOLEAN('o', "orc", &opts.orc, "generate ORC metadata"),
OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"),
OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"),
OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"),
OPT_INTEGER(0, "prefix", &opts.prefix, "generate prefix symbols"),
OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"),
OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"),
OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"),
OPT_BOOLEAN('u', "uaccess", &opts.uaccess, "validate uaccess rules for SMAP"),
OPT_BOOLEAN(0 , "cfi", &opts.cfi, "annotate kernel control flow integrity (kCFI) function preambles"),
OPT_CALLBACK_OPTARG(0, "dump", NULL, NULL, "orc", "dump metadata", parse_dump),
OPT_GROUP("Options:"),
OPT_BOOLEAN(0, "backtrace", &opts.backtrace, "unwind on error"),
OPT_BOOLEAN(0, "backup", &opts.backup, "create .orig files before modification"),
OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"),
OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"),
OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"),
OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"),
OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"),
OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"),
OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"),
OPT_BOOLEAN('v', "verbose", &opts.verbose, "verbose warnings"),
OPT_END(),
};
int cmd_parse_options(int argc, const char **argv, const char * const usage[])
{
const char *envv[16] = { };
char *env;
int envc;
env = getenv("OBJTOOL_ARGS");
if (env) {
envv[0] = "OBJTOOL_ARGS";
for (envc = 1; envc < ARRAY_SIZE(envv); ) {
envv[envc++] = env;
env = strchr(env, ' ');
if (!env)
break;
*env = '\0';
env++;
}
parse_options(envc, envv, check_options, env_usage, 0);
}
env = getenv("OBJTOOL_VERBOSE");
if (env && !strcmp(env, "1"))
opts.verbose = true;
argc = parse_options(argc, argv, check_options, usage, 0);
if (argc != 1)
usage_with_options(usage, check_options);
return argc;
}
static bool opts_valid(void)
{
if (opts.hack_jump_label ||
opts.hack_noinstr ||
opts.ibt ||
opts.mcount ||
opts.noinstr ||
opts.orc ||
opts.retpoline ||
opts.rethunk ||
opts.sls ||
opts.stackval ||
opts.static_call ||
opts.uaccess) {
if (opts.dump_orc) {
ERROR("--dump can't be combined with other options");
return false;
}
return true;
}
if (opts.unret && !opts.rethunk) {
ERROR("--unret requires --rethunk");
return false;
}
if (opts.dump_orc)
return true;
ERROR("At least one command required");
return false;
}
static bool mnop_opts_valid(void)
{
if (opts.mnop && !opts.mcount) {
ERROR("--mnop requires --mcount");
return false;
}
return true;
}
static bool link_opts_valid(struct objtool_file *file)
{
if (opts.link)
return true;
if (has_multiple_files(file->elf)) {
ERROR("Linked object detected, forcing --link");
opts.link = true;
return true;
}
if (opts.noinstr) {
ERROR("--noinstr requires --link");
return false;
}
if (opts.ibt) {
ERROR("--ibt requires --link");
return false;
}
if (opts.unret) {
ERROR("--unret requires --link");
return false;
}
return true;
}
int objtool_run(int argc, const char **argv)
{
const char *objname;
struct objtool_file *file;
int ret;
argc = cmd_parse_options(argc, argv, check_usage);
objname = argv[0];
if (!opts_valid())
return 1;
if (opts.dump_orc)
return orc_dump(objname);
file = objtool_open_read(objname);
if (!file)
return 1;
if (!mnop_opts_valid())
return 1;
if (!link_opts_valid(file))
return 1;
ret = check(file);
if (ret)
return ret;
if (file->elf->changed)
return elf_write(file->elf);
return 0;
}
| linux-master | tools/objtool/builtin-check.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Josh Poimboeuf <[email protected]>
*/
/*
* This file reads all the special sections which have alternate instructions
* which can be patched in or redirected to at runtime.
*/
#include <stdlib.h>
#include <string.h>
#include <arch/special.h>
#include <objtool/builtin.h>
#include <objtool/special.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
struct special_entry {
const char *sec;
bool group, jump_or_nop;
unsigned char size, orig, new;
unsigned char orig_len, new_len; /* group only */
unsigned char feature; /* ALTERNATIVE macro CPU feature */
unsigned char key; /* jump_label key */
};
static const struct special_entry entries[] = {
{
.sec = ".altinstructions",
.group = true,
.size = ALT_ENTRY_SIZE,
.orig = ALT_ORIG_OFFSET,
.orig_len = ALT_ORIG_LEN_OFFSET,
.new = ALT_NEW_OFFSET,
.new_len = ALT_NEW_LEN_OFFSET,
.feature = ALT_FEATURE_OFFSET,
},
{
.sec = "__jump_table",
.jump_or_nop = true,
.size = JUMP_ENTRY_SIZE,
.orig = JUMP_ORIG_OFFSET,
.new = JUMP_NEW_OFFSET,
.key = JUMP_KEY_OFFSET,
},
{
.sec = "__ex_table",
.size = EX_ENTRY_SIZE,
.orig = EX_ORIG_OFFSET,
.new = EX_NEW_OFFSET,
},
{},
};
void __weak arch_handle_alternative(unsigned short feature, struct special_alt *alt)
{
}
static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
unsigned long *off)
{
*sec = reloc->sym->sec;
*off = reloc->sym->offset + reloc_addend(reloc);
}
static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
struct section *sec, int idx,
struct special_alt *alt)
{
struct reloc *orig_reloc, *new_reloc;
unsigned long offset;
offset = idx * entry->size;
alt->group = entry->group;
alt->jump_or_nop = entry->jump_or_nop;
if (alt->group) {
alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset +
entry->orig_len);
alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
entry->new_len);
}
if (entry->feature) {
unsigned short feature;
feature = bswap_if_needed(elf,
*(unsigned short *)(sec->data->d_buf +
offset +
entry->feature));
arch_handle_alternative(feature, alt);
}
orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
if (!orig_reloc) {
WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
return -1;
}
reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
if (!entry->group || alt->new_len) {
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
if (!new_reloc) {
WARN_FUNC("can't find new reloc",
sec, offset + entry->new);
return -1;
}
reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
/* _ASM_EXTABLE_EX hack */
if (alt->new_off >= 0x7ffffff0)
alt->new_off -= 0x7ffffff0;
}
if (entry->key) {
struct reloc *key_reloc;
key_reloc = find_reloc_by_dest(elf, sec, offset + entry->key);
if (!key_reloc) {
WARN_FUNC("can't find key reloc",
sec, offset + entry->key);
return -1;
}
alt->key_addend = reloc_addend(key_reloc);
}
return 0;
}
/*
* Read all the special sections and create a list of special_alt structs which
* describe all the alternate instructions which can be patched in or
* redirected to at runtime.
*/
int special_get_alts(struct elf *elf, struct list_head *alts)
{
const struct special_entry *entry;
struct section *sec;
unsigned int nr_entries;
struct special_alt *alt;
int idx, ret;
INIT_LIST_HEAD(alts);
for (entry = entries; entry->sec; entry++) {
sec = find_section_by_name(elf, entry->sec);
if (!sec)
continue;
if (sec->sh.sh_size % entry->size != 0) {
WARN("%s size not a multiple of %d",
sec->name, entry->size);
return -1;
}
nr_entries = sec->sh.sh_size / entry->size;
for (idx = 0; idx < nr_entries; idx++) {
alt = malloc(sizeof(*alt));
if (!alt) {
WARN("malloc failed");
return -1;
}
memset(alt, 0, sizeof(*alt));
ret = get_alt_entry(elf, entry, sec, idx, alt);
if (ret > 0)
continue;
if (ret < 0)
return ret;
list_add_tail(&alt->list, alts);
}
}
return 0;
}
| linux-master | tools/objtool/special.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Josh Poimboeuf <[email protected]>
*/
#include <stdlib.h>
#include <string.h>
#include <linux/objtool_types.h>
#include <asm/orc_types.h>
#include <objtool/check.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi,
struct instruction *insn)
{
struct cfi_reg *bp = &cfi->regs[CFI_BP];
memset(orc, 0, sizeof(*orc));
if (!cfi) {
/*
* This is usually either unreachable nops/traps (which don't
* trigger unreachable instruction warnings), or
* STACK_FRAME_NON_STANDARD functions.
*/
orc->type = ORC_TYPE_UNDEFINED;
return 0;
}
switch (cfi->type) {
case UNWIND_HINT_TYPE_UNDEFINED:
orc->type = ORC_TYPE_UNDEFINED;
return 0;
case UNWIND_HINT_TYPE_END_OF_STACK:
orc->type = ORC_TYPE_END_OF_STACK;
return 0;
case UNWIND_HINT_TYPE_CALL:
orc->type = ORC_TYPE_CALL;
break;
case UNWIND_HINT_TYPE_REGS:
orc->type = ORC_TYPE_REGS;
break;
case UNWIND_HINT_TYPE_REGS_PARTIAL:
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
}
orc->signal = cfi->signal;
switch (cfi->cfa.base) {
case CFI_SP:
orc->sp_reg = ORC_REG_SP;
break;
case CFI_SP_INDIRECT:
orc->sp_reg = ORC_REG_SP_INDIRECT;
break;
case CFI_BP:
orc->sp_reg = ORC_REG_BP;
break;
case CFI_BP_INDIRECT:
orc->sp_reg = ORC_REG_BP_INDIRECT;
break;
case CFI_R10:
orc->sp_reg = ORC_REG_R10;
break;
case CFI_R13:
orc->sp_reg = ORC_REG_R13;
break;
case CFI_DI:
orc->sp_reg = ORC_REG_DI;
break;
case CFI_DX:
orc->sp_reg = ORC_REG_DX;
break;
default:
WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
return -1;
}
switch (bp->base) {
case CFI_UNDEFINED:
orc->bp_reg = ORC_REG_UNDEFINED;
break;
case CFI_CFA:
orc->bp_reg = ORC_REG_PREV_SP;
break;
case CFI_BP:
orc->bp_reg = ORC_REG_BP;
break;
default:
WARN_INSN(insn, "unknown BP base reg %d", bp->base);
return -1;
}
orc->sp_offset = cfi->cfa.offset;
orc->bp_offset = bp->offset;
return 0;
}
static int write_orc_entry(struct elf *elf, struct section *orc_sec,
struct section *ip_sec, unsigned int idx,
struct section *insn_sec, unsigned long insn_off,
struct orc_entry *o)
{
struct orc_entry *orc;
/* populate ORC data */
orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc));
orc->sp_offset = bswap_if_needed(elf, orc->sp_offset);
orc->bp_offset = bswap_if_needed(elf, orc->bp_offset);
/* populate reloc for ip */
if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx,
insn_sec, insn_off))
return -1;
return 0;
}
struct orc_list_entry {
struct list_head list;
struct orc_entry orc;
struct section *insn_sec;
unsigned long insn_off;
};
static int orc_list_add(struct list_head *orc_list, struct orc_entry *orc,
struct section *sec, unsigned long offset)
{
struct orc_list_entry *entry = malloc(sizeof(*entry));
if (!entry) {
WARN("malloc failed");
return -1;
}
entry->orc = *orc;
entry->insn_sec = sec;
entry->insn_off = offset;
list_add_tail(&entry->list, orc_list);
return 0;
}
static unsigned long alt_group_len(struct alt_group *alt_group)
{
return alt_group->last_insn->offset +
alt_group->last_insn->len -
alt_group->first_insn->offset;
}
int orc_create(struct objtool_file *file)
{
struct section *sec, *orc_sec;
unsigned int nr = 0, idx = 0;
struct orc_list_entry *entry;
struct list_head orc_list;
struct orc_entry null = { .type = ORC_TYPE_UNDEFINED };
/* Build a deduplicated list of ORC entries: */
INIT_LIST_HEAD(&orc_list);
for_each_sec(file, sec) {
struct orc_entry orc, prev_orc = {0};
struct instruction *insn;
bool empty = true;
if (!sec->text)
continue;
sec_for_each_insn(file, sec, insn) {
struct alt_group *alt_group = insn->alt_group;
int i;
if (!alt_group) {
if (init_orc_entry(&orc, insn->cfi, insn))
return -1;
if (!memcmp(&prev_orc, &orc, sizeof(orc)))
continue;
if (orc_list_add(&orc_list, &orc, sec,
insn->offset))
return -1;
nr++;
prev_orc = orc;
empty = false;
continue;
}
/*
* Alternatives can have different stack layout
* possibilities (but they shouldn't conflict).
* Instead of traversing the instructions, use the
* alt_group's flattened byte-offset-addressed CFI
* array.
*/
for (i = 0; i < alt_group_len(alt_group); i++) {
struct cfi_state *cfi = alt_group->cfi[i];
if (!cfi)
continue;
/* errors are reported on the original insn */
if (init_orc_entry(&orc, cfi, insn))
return -1;
if (!memcmp(&prev_orc, &orc, sizeof(orc)))
continue;
if (orc_list_add(&orc_list, &orc, insn->sec,
insn->offset + i))
return -1;
nr++;
prev_orc = orc;
empty = false;
}
/* Skip to the end of the alt_group */
insn = alt_group->last_insn;
}
/* Add a section terminator */
if (!empty) {
orc_list_add(&orc_list, &null, sec, sec->sh.sh_size);
nr++;
}
}
if (!nr)
return 0;
/* Create .orc_unwind, .orc_unwind_ip and .rela.orc_unwind_ip sections: */
sec = find_section_by_name(file->elf, ".orc_unwind");
if (sec) {
WARN("file already has .orc_unwind section, skipping");
return -1;
}
orc_sec = elf_create_section(file->elf, ".orc_unwind",
sizeof(struct orc_entry), nr);
if (!orc_sec)
return -1;
sec = elf_create_section_pair(file->elf, ".orc_unwind_ip", sizeof(int), nr, nr);
if (!sec)
return -1;
/* Write ORC entries to sections: */
list_for_each_entry(entry, &orc_list, list) {
if (write_orc_entry(file->elf, orc_sec, sec, idx++,
entry->insn_sec, entry->insn_off,
&entry->orc))
return -1;
}
return 0;
}
| linux-master | tools/objtool/orc_gen.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Josh Poimboeuf <[email protected]>
*/
#include <unistd.h>
#include <asm/orc_types.h>
#include <objtool/objtool.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
static const char *reg_name(unsigned int reg)
{
switch (reg) {
case ORC_REG_PREV_SP:
return "prevsp";
case ORC_REG_DX:
return "dx";
case ORC_REG_DI:
return "di";
case ORC_REG_BP:
return "bp";
case ORC_REG_SP:
return "sp";
case ORC_REG_R10:
return "r10";
case ORC_REG_R13:
return "r13";
case ORC_REG_BP_INDIRECT:
return "bp(ind)";
case ORC_REG_SP_INDIRECT:
return "sp(ind)";
default:
return "?";
}
}
static const char *orc_type_name(unsigned int type)
{
switch (type) {
case ORC_TYPE_UNDEFINED:
return "(und)";
case ORC_TYPE_END_OF_STACK:
return "end";
case ORC_TYPE_CALL:
return "call";
case ORC_TYPE_REGS:
return "regs";
case ORC_TYPE_REGS_PARTIAL:
return "regs (partial)";
default:
return "?";
}
}
static void print_reg(unsigned int reg, int offset)
{
if (reg == ORC_REG_BP_INDIRECT)
printf("(bp%+d)", offset);
else if (reg == ORC_REG_SP_INDIRECT)
printf("(sp)%+d", offset);
else if (reg == ORC_REG_UNDEFINED)
printf("(und)");
else
printf("%s%+d", reg_name(reg), offset);
}
int orc_dump(const char *_objname)
{
int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
struct orc_entry *orc = NULL;
char *name;
size_t nr_sections;
Elf64_Addr orc_ip_addr = 0;
size_t shstrtab_idx, strtab_idx = 0;
Elf *elf;
Elf_Scn *scn;
GElf_Shdr sh;
GElf_Rela rela;
GElf_Sym sym;
Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL;
struct elf dummy_elf = {};
objname = _objname;
elf_version(EV_CURRENT);
fd = open(objname, O_RDONLY);
if (fd == -1) {
perror("open");
return -1;
}
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
if (!elf) {
WARN_ELF("elf_begin");
return -1;
}
if (!elf64_getehdr(elf)) {
WARN_ELF("elf64_getehdr");
return -1;
}
memcpy(&dummy_elf.ehdr, elf64_getehdr(elf), sizeof(dummy_elf.ehdr));
if (elf_getshdrnum(elf, &nr_sections)) {
WARN_ELF("elf_getshdrnum");
return -1;
}
if (elf_getshdrstrndx(elf, &shstrtab_idx)) {
WARN_ELF("elf_getshdrstrndx");
return -1;
}
for (i = 0; i < nr_sections; i++) {
scn = elf_getscn(elf, i);
if (!scn) {
WARN_ELF("elf_getscn");
return -1;
}
if (!gelf_getshdr(scn, &sh)) {
WARN_ELF("gelf_getshdr");
return -1;
}
name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
if (!name) {
WARN_ELF("elf_strptr");
return -1;
}
data = elf_getdata(scn, NULL);
if (!data) {
WARN_ELF("elf_getdata");
return -1;
}
if (!strcmp(name, ".symtab")) {
symtab = data;
} else if (!strcmp(name, ".strtab")) {
strtab_idx = i;
} else if (!strcmp(name, ".orc_unwind")) {
orc = data->d_buf;
orc_size = sh.sh_size;
} else if (!strcmp(name, ".orc_unwind_ip")) {
orc_ip = data->d_buf;
orc_ip_addr = sh.sh_addr;
} else if (!strcmp(name, ".rela.orc_unwind_ip")) {
rela_orc_ip = data;
}
}
if (!symtab || !strtab_idx || !orc || !orc_ip)
return 0;
if (orc_size % sizeof(*orc) != 0) {
WARN("bad .orc_unwind section size");
return -1;
}
nr_entries = orc_size / sizeof(*orc);
for (i = 0; i < nr_entries; i++) {
if (rela_orc_ip) {
if (!gelf_getrela(rela_orc_ip, i, &rela)) {
WARN_ELF("gelf_getrela");
return -1;
}
if (!gelf_getsym(symtab, GELF_R_SYM(rela.r_info), &sym)) {
WARN_ELF("gelf_getsym");
return -1;
}
if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
scn = elf_getscn(elf, sym.st_shndx);
if (!scn) {
WARN_ELF("elf_getscn");
return -1;
}
if (!gelf_getshdr(scn, &sh)) {
WARN_ELF("gelf_getshdr");
return -1;
}
name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
if (!name) {
WARN_ELF("elf_strptr");
return -1;
}
} else {
name = elf_strptr(elf, strtab_idx, sym.st_name);
if (!name) {
WARN_ELF("elf_strptr");
return -1;
}
}
printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
} else {
printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
}
printf("type:%s", orc_type_name(orc[i].type));
printf(" sp:");
print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset));
printf(" bp:");
print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset));
printf(" signal:%d\n", orc[i].signal);
}
elf_end(elf);
close(fd);
return 0;
}
| linux-master | tools/objtool/orc_dump.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* elf.c - ELF access library
*
* Adapted from kpatch (https://github.com/dynup/kpatch):
* Copyright (C) 2013-2015 Josh Poimboeuf <[email protected]>
* Copyright (C) 2014 Seth Jennings <[email protected]>
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <linux/interval_tree_generic.h>
#include <objtool/builtin.h>
#include <objtool/elf.h>
#include <objtool/warn.h>
#define MAX_NAME_LEN 128
static inline u32 str_hash(const char *str)
{
return jhash(str, strlen(str), 0);
}
#define __elf_table(name) (elf->name##_hash)
#define __elf_bits(name) (elf->name##_bits)
#define __elf_table_entry(name, key) \
__elf_table(name)[hash_min(key, __elf_bits(name))]
#define elf_hash_add(name, node, key) \
({ \
struct elf_hash_node *__node = node; \
__node->next = __elf_table_entry(name, key); \
__elf_table_entry(name, key) = __node; \
})
static inline void __elf_hash_del(struct elf_hash_node *node,
struct elf_hash_node **head)
{
struct elf_hash_node *cur, *prev;
if (node == *head) {
*head = node->next;
return;
}
for (prev = NULL, cur = *head; cur; prev = cur, cur = cur->next) {
if (cur == node) {
prev->next = cur->next;
break;
}
}
}
#define elf_hash_del(name, node, key) \
__elf_hash_del(node, &__elf_table_entry(name, key))
#define elf_list_entry(ptr, type, member) \
({ \
typeof(ptr) __ptr = (ptr); \
__ptr ? container_of(__ptr, type, member) : NULL; \
})
#define elf_hash_for_each_possible(name, obj, member, key) \
for (obj = elf_list_entry(__elf_table_entry(name, key), typeof(*obj), member); \
obj; \
obj = elf_list_entry(obj->member.next, typeof(*(obj)), member))
#define elf_alloc_hash(name, size) \
({ \
__elf_bits(name) = max(10, ilog2(size)); \
__elf_table(name) = mmap(NULL, sizeof(struct elf_hash_node *) << __elf_bits(name), \
PROT_READ|PROT_WRITE, \
MAP_PRIVATE|MAP_ANON, -1, 0); \
if (__elf_table(name) == (void *)-1L) { \
WARN("mmap fail " #name); \
__elf_table(name) = NULL; \
} \
__elf_table(name); \
})
static inline unsigned long __sym_start(struct symbol *s)
{
return s->offset;
}
static inline unsigned long __sym_last(struct symbol *s)
{
return s->offset + s->len - 1;
}
INTERVAL_TREE_DEFINE(struct symbol, node, unsigned long, __subtree_last,
__sym_start, __sym_last, static, __sym)
#define __sym_for_each(_iter, _tree, _start, _end) \
for (_iter = __sym_iter_first((_tree), (_start), (_end)); \
_iter; _iter = __sym_iter_next(_iter, (_start), (_end)))
struct symbol_hole {
unsigned long key;
const struct symbol *sym;
};
/*
* Find !section symbol where @offset is after it.
*/
static int symbol_hole_by_offset(const void *key, const struct rb_node *node)
{
const struct symbol *s = rb_entry(node, struct symbol, node);
struct symbol_hole *sh = (void *)key;
if (sh->key < s->offset)
return -1;
if (sh->key >= s->offset + s->len) {
if (s->type != STT_SECTION)
sh->sym = s;
return 1;
}
return 0;
}
struct section *find_section_by_name(const struct elf *elf, const char *name)
{
struct section *sec;
elf_hash_for_each_possible(section_name, sec, name_hash, str_hash(name)) {
if (!strcmp(sec->name, name))
return sec;
}
return NULL;
}
static struct section *find_section_by_index(struct elf *elf,
unsigned int idx)
{
struct section *sec;
elf_hash_for_each_possible(section, sec, hash, idx) {
if (sec->idx == idx)
return sec;
}
return NULL;
}
static struct symbol *find_symbol_by_index(struct elf *elf, unsigned int idx)
{
struct symbol *sym;
elf_hash_for_each_possible(symbol, sym, hash, idx) {
if (sym->idx == idx)
return sym;
}
return NULL;
}
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
{
struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
struct symbol *iter;
__sym_for_each(iter, tree, offset, offset) {
if (iter->offset == offset && iter->type != STT_SECTION)
return iter;
}
return NULL;
}
struct symbol *find_func_by_offset(struct section *sec, unsigned long offset)
{
struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
struct symbol *iter;
__sym_for_each(iter, tree, offset, offset) {
if (iter->offset == offset && iter->type == STT_FUNC)
return iter;
}
return NULL;
}
struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset)
{
struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
struct symbol *iter;
__sym_for_each(iter, tree, offset, offset) {
if (iter->type != STT_SECTION)
return iter;
}
return NULL;
}
/*
* Returns size of hole starting at @offset.
*/
int find_symbol_hole_containing(const struct section *sec, unsigned long offset)
{
struct symbol_hole hole = {
.key = offset,
.sym = NULL,
};
struct rb_node *n;
struct symbol *s;
/*
* Find the rightmost symbol for which @offset is after it.
*/
n = rb_find(&hole, &sec->symbol_tree.rb_root, symbol_hole_by_offset);
/* found a symbol that contains @offset */
if (n)
return 0; /* not a hole */
/* didn't find a symbol for which @offset is after it */
if (!hole.sym)
return 0; /* not a hole */
/* @offset >= sym->offset + sym->len, find symbol after it */
n = rb_next(&hole.sym->node);
if (!n)
return -1; /* until end of address space */
/* hole until start of next symbol */
s = rb_entry(n, struct symbol, node);
return s->offset - offset;
}
struct symbol *find_func_containing(struct section *sec, unsigned long offset)
{
struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
struct symbol *iter;
__sym_for_each(iter, tree, offset, offset) {
if (iter->type == STT_FUNC)
return iter;
}
return NULL;
}
struct symbol *find_symbol_by_name(const struct elf *elf, const char *name)
{
struct symbol *sym;
elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) {
if (!strcmp(sym->name, name))
return sym;
}
return NULL;
}
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len)
{
struct reloc *reloc, *r = NULL;
struct section *rsec;
unsigned long o;
rsec = sec->rsec;
if (!rsec)
return NULL;
for_offset_range(o, offset, offset + len) {
elf_hash_for_each_possible(reloc, reloc, hash,
sec_offset_hash(rsec, o)) {
if (reloc->sec != rsec)
continue;
if (reloc_offset(reloc) >= offset &&
reloc_offset(reloc) < offset + len) {
if (!r || reloc_offset(reloc) < reloc_offset(r))
r = reloc;
}
}
if (r)
return r;
}
return NULL;
}
struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, unsigned long offset)
{
return find_reloc_by_dest_range(elf, sec, offset, 1);
}
static bool is_dwarf_section(struct section *sec)
{
return !strncmp(sec->name, ".debug_", 7);
}
static int read_sections(struct elf *elf)
{
Elf_Scn *s = NULL;
struct section *sec;
size_t shstrndx, sections_nr;
int i;
if (elf_getshdrnum(elf->elf, §ions_nr)) {
WARN_ELF("elf_getshdrnum");
return -1;
}
if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
WARN_ELF("elf_getshdrstrndx");
return -1;
}
if (!elf_alloc_hash(section, sections_nr) ||
!elf_alloc_hash(section_name, sections_nr))
return -1;
elf->section_data = calloc(sections_nr, sizeof(*sec));
if (!elf->section_data) {
perror("calloc");
return -1;
}
for (i = 0; i < sections_nr; i++) {
sec = &elf->section_data[i];
INIT_LIST_HEAD(&sec->symbol_list);
s = elf_getscn(elf->elf, i);
if (!s) {
WARN_ELF("elf_getscn");
return -1;
}
sec->idx = elf_ndxscn(s);
if (!gelf_getshdr(s, &sec->sh)) {
WARN_ELF("gelf_getshdr");
return -1;
}
sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
if (!sec->name) {
WARN_ELF("elf_strptr");
return -1;
}
if (sec->sh.sh_size != 0 && !is_dwarf_section(sec)) {
sec->data = elf_getdata(s, NULL);
if (!sec->data) {
WARN_ELF("elf_getdata");
return -1;
}
if (sec->data->d_off != 0 ||
sec->data->d_size != sec->sh.sh_size) {
WARN("unexpected data attributes for %s",
sec->name);
return -1;
}
}
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(section, &sec->hash, sec->idx);
elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
if (is_reloc_sec(sec))
elf->num_relocs += sec_num_entries(sec);
}
if (opts.stats) {
printf("nr_sections: %lu\n", (unsigned long)sections_nr);
printf("section_bits: %d\n", elf->section_bits);
}
/* sanity check, one more call to elf_nextscn() should return NULL */
if (elf_nextscn(elf->elf, s)) {
WARN("section entry mismatch");
return -1;
}
return 0;
}
static void elf_add_symbol(struct elf *elf, struct symbol *sym)
{
struct list_head *entry;
struct rb_node *pnode;
struct symbol *iter;
INIT_LIST_HEAD(&sym->pv_target);
sym->alias = sym;
sym->type = GELF_ST_TYPE(sym->sym.st_info);
sym->bind = GELF_ST_BIND(sym->sym.st_info);
if (sym->type == STT_FILE)
elf->num_files++;
sym->offset = sym->sym.st_value;
sym->len = sym->sym.st_size;
__sym_for_each(iter, &sym->sec->symbol_tree, sym->offset, sym->offset) {
if (iter->offset == sym->offset && iter->type == sym->type)
iter->alias = sym;
}
__sym_insert(sym, &sym->sec->symbol_tree);
pnode = rb_prev(&sym->node);
if (pnode)
entry = &rb_entry(pnode, struct symbol, node)->list;
else
entry = &sym->sec->symbol_list;
list_add(&sym->list, entry);
elf_hash_add(symbol, &sym->hash, sym->idx);
elf_hash_add(symbol_name, &sym->name_hash, str_hash(sym->name));
/*
* Don't store empty STT_NOTYPE symbols in the rbtree. They
* can exist within a function, confusing the sorting.
*/
if (!sym->len)
__sym_remove(sym, &sym->sec->symbol_tree);
}
static int read_symbols(struct elf *elf)
{
struct section *symtab, *symtab_shndx, *sec;
struct symbol *sym, *pfunc;
int symbols_nr, i;
char *coldstr;
Elf_Data *shndx_data = NULL;
Elf32_Word shndx;
symtab = find_section_by_name(elf, ".symtab");
if (symtab) {
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
if (symtab_shndx)
shndx_data = symtab_shndx->data;
symbols_nr = sec_num_entries(symtab);
} else {
/*
* A missing symbol table is actually possible if it's an empty
* .o file. This can happen for thunk_64.o. Make sure to at
* least allocate the symbol hash tables so we can do symbol
* lookups without crashing.
*/
symbols_nr = 0;
}
if (!elf_alloc_hash(symbol, symbols_nr) ||
!elf_alloc_hash(symbol_name, symbols_nr))
return -1;
elf->symbol_data = calloc(symbols_nr, sizeof(*sym));
if (!elf->symbol_data) {
perror("calloc");
return -1;
}
for (i = 0; i < symbols_nr; i++) {
sym = &elf->symbol_data[i];
sym->idx = i;
if (!gelf_getsymshndx(symtab->data, shndx_data, i, &sym->sym,
&shndx)) {
WARN_ELF("gelf_getsymshndx");
goto err;
}
sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
sym->sym.st_name);
if (!sym->name) {
WARN_ELF("elf_strptr");
goto err;
}
if ((sym->sym.st_shndx > SHN_UNDEF &&
sym->sym.st_shndx < SHN_LORESERVE) ||
(shndx_data && sym->sym.st_shndx == SHN_XINDEX)) {
if (sym->sym.st_shndx != SHN_XINDEX)
shndx = sym->sym.st_shndx;
sym->sec = find_section_by_index(elf, shndx);
if (!sym->sec) {
WARN("couldn't find section for symbol %s",
sym->name);
goto err;
}
if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) {
sym->name = sym->sec->name;
sym->sec->sym = sym;
}
} else
sym->sec = find_section_by_index(elf, 0);
elf_add_symbol(elf, sym);
}
if (opts.stats) {
printf("nr_symbols: %lu\n", (unsigned long)symbols_nr);
printf("symbol_bits: %d\n", elf->symbol_bits);
}
/* Create parent/child links for any cold subfunctions */
list_for_each_entry(sec, &elf->sections, list) {
sec_for_each_sym(sec, sym) {
char pname[MAX_NAME_LEN + 1];
size_t pnamelen;
if (sym->type != STT_FUNC)
continue;
if (sym->pfunc == NULL)
sym->pfunc = sym;
if (sym->cfunc == NULL)
sym->cfunc = sym;
coldstr = strstr(sym->name, ".cold");
if (!coldstr)
continue;
pnamelen = coldstr - sym->name;
if (pnamelen > MAX_NAME_LEN) {
WARN("%s(): parent function name exceeds maximum length of %d characters",
sym->name, MAX_NAME_LEN);
return -1;
}
strncpy(pname, sym->name, pnamelen);
pname[pnamelen] = '\0';
pfunc = find_symbol_by_name(elf, pname);
if (!pfunc) {
WARN("%s(): can't find parent function",
sym->name);
return -1;
}
sym->pfunc = pfunc;
pfunc->cfunc = sym;
/*
* Unfortunately, -fnoreorder-functions puts the child
* inside the parent. Remove the overlap so we can
* have sane assumptions.
*
* Note that pfunc->len now no longer matches
* pfunc->sym.st_size.
*/
if (sym->sec == pfunc->sec &&
sym->offset >= pfunc->offset &&
sym->offset + sym->len == pfunc->offset + pfunc->len) {
pfunc->len -= sym->len;
}
}
}
return 0;
err:
free(sym);
return -1;
}
/*
* @sym's idx has changed. Update the relocs which reference it.
*/
static int elf_update_sym_relocs(struct elf *elf, struct symbol *sym)
{
struct reloc *reloc;
for (reloc = sym->relocs; reloc; reloc = reloc->sym_next_reloc)
set_reloc_sym(elf, reloc, reloc->sym->idx);
return 0;
}
/*
* The libelf API is terrible; gelf_update_sym*() takes a data block relative
* index value, *NOT* the symbol index. As such, iterate the data blocks and
* adjust index until it fits.
*
* If no data block is found, allow adding a new data block provided the index
* is only one past the end.
*/
static int elf_update_symbol(struct elf *elf, struct section *symtab,
struct section *symtab_shndx, struct symbol *sym)
{
Elf32_Word shndx = sym->sec ? sym->sec->idx : SHN_UNDEF;
Elf_Data *symtab_data = NULL, *shndx_data = NULL;
Elf64_Xword entsize = symtab->sh.sh_entsize;
int max_idx, idx = sym->idx;
Elf_Scn *s, *t = NULL;
bool is_special_shndx = sym->sym.st_shndx >= SHN_LORESERVE &&
sym->sym.st_shndx != SHN_XINDEX;
if (is_special_shndx)
shndx = sym->sym.st_shndx;
s = elf_getscn(elf->elf, symtab->idx);
if (!s) {
WARN_ELF("elf_getscn");
return -1;
}
if (symtab_shndx) {
t = elf_getscn(elf->elf, symtab_shndx->idx);
if (!t) {
WARN_ELF("elf_getscn");
return -1;
}
}
for (;;) {
/* get next data descriptor for the relevant sections */
symtab_data = elf_getdata(s, symtab_data);
if (t)
shndx_data = elf_getdata(t, shndx_data);
/* end-of-list */
if (!symtab_data) {
/*
* Over-allocate to avoid O(n^2) symbol creation
* behaviour. The down side is that libelf doesn't
* like this; see elf_truncate_section() for the fixup.
*/
int num = max(1U, sym->idx/3);
void *buf;
if (idx) {
/* we don't do holes in symbol tables */
WARN("index out of range");
return -1;
}
/* if @idx == 0, it's the next contiguous entry, create it */
symtab_data = elf_newdata(s);
if (t)
shndx_data = elf_newdata(t);
buf = calloc(num, entsize);
if (!buf) {
WARN("malloc");
return -1;
}
symtab_data->d_buf = buf;
symtab_data->d_size = num * entsize;
symtab_data->d_align = 1;
symtab_data->d_type = ELF_T_SYM;
mark_sec_changed(elf, symtab, true);
symtab->truncate = true;
if (t) {
buf = calloc(num, sizeof(Elf32_Word));
if (!buf) {
WARN("malloc");
return -1;
}
shndx_data->d_buf = buf;
shndx_data->d_size = num * sizeof(Elf32_Word);
shndx_data->d_align = sizeof(Elf32_Word);
shndx_data->d_type = ELF_T_WORD;
mark_sec_changed(elf, symtab_shndx, true);
symtab_shndx->truncate = true;
}
break;
}
/* empty blocks should not happen */
if (!symtab_data->d_size) {
WARN("zero size data");
return -1;
}
/* is this the right block? */
max_idx = symtab_data->d_size / entsize;
if (idx < max_idx)
break;
/* adjust index and try again */
idx -= max_idx;
}
/* something went side-ways */
if (idx < 0) {
WARN("negative index");
return -1;
}
/* setup extended section index magic and write the symbol */
if ((shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) || is_special_shndx) {
sym->sym.st_shndx = shndx;
if (!shndx_data)
shndx = 0;
} else {
sym->sym.st_shndx = SHN_XINDEX;
if (!shndx_data) {
WARN("no .symtab_shndx");
return -1;
}
}
if (!gelf_update_symshndx(symtab_data, shndx_data, idx, &sym->sym, shndx)) {
WARN_ELF("gelf_update_symshndx");
return -1;
}
return 0;
}
static struct symbol *
__elf_create_symbol(struct elf *elf, struct symbol *sym)
{
struct section *symtab, *symtab_shndx;
Elf32_Word first_non_local, new_idx;
struct symbol *old;
symtab = find_section_by_name(elf, ".symtab");
if (symtab) {
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
} else {
WARN("no .symtab");
return NULL;
}
new_idx = sec_num_entries(symtab);
if (GELF_ST_BIND(sym->sym.st_info) != STB_LOCAL)
goto non_local;
/*
* Move the first global symbol, as per sh_info, into a new, higher
* symbol index. This fees up a spot for a new local symbol.
*/
first_non_local = symtab->sh.sh_info;
old = find_symbol_by_index(elf, first_non_local);
if (old) {
elf_hash_del(symbol, &old->hash, old->idx);
elf_hash_add(symbol, &old->hash, new_idx);
old->idx = new_idx;
if (elf_update_symbol(elf, symtab, symtab_shndx, old)) {
WARN("elf_update_symbol move");
return NULL;
}
if (elf_update_sym_relocs(elf, old))
return NULL;
new_idx = first_non_local;
}
/*
* Either way, we will add a LOCAL symbol.
*/
symtab->sh.sh_info += 1;
non_local:
sym->idx = new_idx;
if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) {
WARN("elf_update_symbol");
return NULL;
}
symtab->sh.sh_size += symtab->sh.sh_entsize;
mark_sec_changed(elf, symtab, true);
if (symtab_shndx) {
symtab_shndx->sh.sh_size += sizeof(Elf32_Word);
mark_sec_changed(elf, symtab_shndx, true);
}
return sym;
}
static struct symbol *
elf_create_section_symbol(struct elf *elf, struct section *sec)
{
struct symbol *sym = calloc(1, sizeof(*sym));
if (!sym) {
perror("malloc");
return NULL;
}
sym->name = sec->name;
sym->sec = sec;
// st_name 0
sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
// st_other 0
// st_value 0
// st_size 0
sym = __elf_create_symbol(elf, sym);
if (sym)
elf_add_symbol(elf, sym);
return sym;
}
static int elf_add_string(struct elf *elf, struct section *strtab, char *str);
struct symbol *
elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size)
{
struct symbol *sym = calloc(1, sizeof(*sym));
size_t namelen = strlen(orig->name) + sizeof("__pfx_");
char *name = malloc(namelen);
if (!sym || !name) {
perror("malloc");
return NULL;
}
snprintf(name, namelen, "__pfx_%s", orig->name);
sym->name = name;
sym->sec = orig->sec;
sym->sym.st_name = elf_add_string(elf, NULL, name);
sym->sym.st_info = orig->sym.st_info;
sym->sym.st_value = orig->sym.st_value - size;
sym->sym.st_size = size;
sym = __elf_create_symbol(elf, sym);
if (sym)
elf_add_symbol(elf, sym);
return sym;
}
static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
unsigned int reloc_idx,
unsigned long offset, struct symbol *sym,
s64 addend, unsigned int type)
{
struct reloc *reloc, empty = { 0 };
if (reloc_idx >= sec_num_entries(rsec)) {
WARN("%s: bad reloc_idx %u for %s with %d relocs",
__func__, reloc_idx, rsec->name, sec_num_entries(rsec));
return NULL;
}
reloc = &rsec->relocs[reloc_idx];
if (memcmp(reloc, &empty, sizeof(empty))) {
WARN("%s: %s: reloc %d already initialized!",
__func__, rsec->name, reloc_idx);
return NULL;
}
reloc->sec = rsec;
reloc->sym = sym;
set_reloc_offset(elf, reloc, offset);
set_reloc_sym(elf, reloc, sym->idx);
set_reloc_type(elf, reloc, type);
set_reloc_addend(elf, reloc, addend);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
reloc->sym_next_reloc = sym->relocs;
sym->relocs = reloc;
return reloc;
}
struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec,
unsigned long offset,
unsigned int reloc_idx,
struct section *insn_sec,
unsigned long insn_off)
{
struct symbol *sym = insn_sec->sym;
int addend = insn_off;
if (!(insn_sec->sh.sh_flags & SHF_EXECINSTR)) {
WARN("bad call to %s() for data symbol %s",
__func__, sym->name);
return NULL;
}
if (!sym) {
/*
* Due to how weak functions work, we must use section based
* relocations. Symbol based relocations would result in the
* weak and non-weak function annotations being overlaid on the
* non-weak function after linking.
*/
sym = elf_create_section_symbol(elf, insn_sec);
if (!sym)
return NULL;
insn_sec->sym = sym;
}
return elf_init_reloc(elf, sec->rsec, reloc_idx, offset, sym, addend,
elf_text_rela_type(elf));
}
struct reloc *elf_init_reloc_data_sym(struct elf *elf, struct section *sec,
unsigned long offset,
unsigned int reloc_idx,
struct symbol *sym,
s64 addend)
{
if (sym->sec && (sec->sh.sh_flags & SHF_EXECINSTR)) {
WARN("bad call to %s() for text symbol %s",
__func__, sym->name);
return NULL;
}
return elf_init_reloc(elf, sec->rsec, reloc_idx, offset, sym, addend,
elf_data_rela_type(elf));
}
static int read_relocs(struct elf *elf)
{
unsigned long nr_reloc, max_reloc = 0;
struct section *rsec;
struct reloc *reloc;
unsigned int symndx;
struct symbol *sym;
int i;
if (!elf_alloc_hash(reloc, elf->num_relocs))
return -1;
list_for_each_entry(rsec, &elf->sections, list) {
if (!is_reloc_sec(rsec))
continue;
rsec->base = find_section_by_index(elf, rsec->sh.sh_info);
if (!rsec->base) {
WARN("can't find base section for reloc section %s",
rsec->name);
return -1;
}
rsec->base->rsec = rsec;
nr_reloc = 0;
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(*reloc));
if (!rsec->relocs) {
perror("calloc");
return -1;
}
for (i = 0; i < sec_num_entries(rsec); i++) {
reloc = &rsec->relocs[i];
reloc->sec = rsec;
symndx = reloc_sym(reloc);
reloc->sym = sym = find_symbol_by_index(elf, symndx);
if (!reloc->sym) {
WARN("can't find reloc entry symbol %d for %s",
symndx, rsec->name);
return -1;
}
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
reloc->sym_next_reloc = sym->relocs;
sym->relocs = reloc;
nr_reloc++;
}
max_reloc = max(max_reloc, nr_reloc);
}
if (opts.stats) {
printf("max_reloc: %lu\n", max_reloc);
printf("num_relocs: %lu\n", elf->num_relocs);
printf("reloc_bits: %d\n", elf->reloc_bits);
}
return 0;
}
struct elf *elf_open_read(const char *name, int flags)
{
struct elf *elf;
Elf_Cmd cmd;
elf_version(EV_CURRENT);
elf = malloc(sizeof(*elf));
if (!elf) {
perror("malloc");
return NULL;
}
memset(elf, 0, sizeof(*elf));
INIT_LIST_HEAD(&elf->sections);
elf->fd = open(name, flags);
if (elf->fd == -1) {
fprintf(stderr, "objtool: Can't open '%s': %s\n",
name, strerror(errno));
goto err;
}
if ((flags & O_ACCMODE) == O_RDONLY)
cmd = ELF_C_READ_MMAP;
else if ((flags & O_ACCMODE) == O_RDWR)
cmd = ELF_C_RDWR;
else /* O_WRONLY */
cmd = ELF_C_WRITE;
elf->elf = elf_begin(elf->fd, cmd, NULL);
if (!elf->elf) {
WARN_ELF("elf_begin");
goto err;
}
if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
WARN_ELF("gelf_getehdr");
goto err;
}
if (read_sections(elf))
goto err;
if (read_symbols(elf))
goto err;
if (read_relocs(elf))
goto err;
return elf;
err:
elf_close(elf);
return NULL;
}
static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
{
Elf_Data *data;
Elf_Scn *s;
int len;
if (!strtab)
strtab = find_section_by_name(elf, ".strtab");
if (!strtab) {
WARN("can't find .strtab section");
return -1;
}
s = elf_getscn(elf->elf, strtab->idx);
if (!s) {
WARN_ELF("elf_getscn");
return -1;
}
data = elf_newdata(s);
if (!data) {
WARN_ELF("elf_newdata");
return -1;
}
data->d_buf = str;
data->d_size = strlen(str) + 1;
data->d_align = 1;
len = strtab->sh.sh_size;
strtab->sh.sh_size += data->d_size;
mark_sec_changed(elf, strtab, true);
return len;
}
struct section *elf_create_section(struct elf *elf, const char *name,
size_t entsize, unsigned int nr)
{
struct section *sec, *shstrtab;
size_t size = entsize * nr;
Elf_Scn *s;
sec = malloc(sizeof(*sec));
if (!sec) {
perror("malloc");
return NULL;
}
memset(sec, 0, sizeof(*sec));
INIT_LIST_HEAD(&sec->symbol_list);
s = elf_newscn(elf->elf);
if (!s) {
WARN_ELF("elf_newscn");
return NULL;
}
sec->name = strdup(name);
if (!sec->name) {
perror("strdup");
return NULL;
}
sec->idx = elf_ndxscn(s);
sec->data = elf_newdata(s);
if (!sec->data) {
WARN_ELF("elf_newdata");
return NULL;
}
sec->data->d_size = size;
sec->data->d_align = 1;
if (size) {
sec->data->d_buf = malloc(size);
if (!sec->data->d_buf) {
perror("malloc");
return NULL;
}
memset(sec->data->d_buf, 0, size);
}
if (!gelf_getshdr(s, &sec->sh)) {
WARN_ELF("gelf_getshdr");
return NULL;
}
sec->sh.sh_size = size;
sec->sh.sh_entsize = entsize;
sec->sh.sh_type = SHT_PROGBITS;
sec->sh.sh_addralign = 1;
sec->sh.sh_flags = SHF_ALLOC;
/* Add section name to .shstrtab (or .strtab for Clang) */
shstrtab = find_section_by_name(elf, ".shstrtab");
if (!shstrtab)
shstrtab = find_section_by_name(elf, ".strtab");
if (!shstrtab) {
WARN("can't find .shstrtab or .strtab section");
return NULL;
}
sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
if (sec->sh.sh_name == -1)
return NULL;
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(section, &sec->hash, sec->idx);
elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
mark_sec_changed(elf, sec, true);
return sec;
}
static struct section *elf_create_rela_section(struct elf *elf,
struct section *sec,
unsigned int reloc_nr)
{
struct section *rsec;
char *rsec_name;
rsec_name = malloc(strlen(sec->name) + strlen(".rela") + 1);
if (!rsec_name) {
perror("malloc");
return NULL;
}
strcpy(rsec_name, ".rela");
strcat(rsec_name, sec->name);
rsec = elf_create_section(elf, rsec_name, elf_rela_size(elf), reloc_nr);
free(rsec_name);
if (!rsec)
return NULL;
rsec->data->d_type = ELF_T_RELA;
rsec->sh.sh_type = SHT_RELA;
rsec->sh.sh_addralign = elf_addr_size(elf);
rsec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
rsec->sh.sh_info = sec->idx;
rsec->sh.sh_flags = SHF_INFO_LINK;
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(struct reloc));
if (!rsec->relocs) {
perror("calloc");
return NULL;
}
sec->rsec = rsec;
rsec->base = sec;
return rsec;
}
struct section *elf_create_section_pair(struct elf *elf, const char *name,
size_t entsize, unsigned int nr,
unsigned int reloc_nr)
{
struct section *sec;
sec = elf_create_section(elf, name, entsize, nr);
if (!sec)
return NULL;
if (!elf_create_rela_section(elf, sec, reloc_nr))
return NULL;
return sec;
}
int elf_write_insn(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len,
const char *insn)
{
Elf_Data *data = sec->data;
if (data->d_type != ELF_T_BYTE || data->d_off) {
WARN("write to unexpected data for section: %s", sec->name);
return -1;
}
memcpy(data->d_buf + offset, insn, len);
mark_sec_changed(elf, sec, true);
return 0;
}
/*
* When Elf_Scn::sh_size is smaller than the combined Elf_Data::d_size
* do you:
*
* A) adhere to the section header and truncate the data, or
* B) ignore the section header and write out all the data you've got?
*
* Yes, libelf sucks and we need to manually truncate if we over-allocate data.
*/
static int elf_truncate_section(struct elf *elf, struct section *sec)
{
u64 size = sec->sh.sh_size;
bool truncated = false;
Elf_Data *data = NULL;
Elf_Scn *s;
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
WARN_ELF("elf_getscn");
return -1;
}
for (;;) {
/* get next data descriptor for the relevant section */
data = elf_getdata(s, data);
if (!data) {
if (size) {
WARN("end of section data but non-zero size left\n");
return -1;
}
return 0;
}
if (truncated) {
/* when we remove symbols */
WARN("truncated; but more data\n");
return -1;
}
if (!data->d_size) {
WARN("zero size data");
return -1;
}
if (data->d_size > size) {
truncated = true;
data->d_size = size;
}
size -= data->d_size;
}
}
int elf_write(struct elf *elf)
{
struct section *sec;
Elf_Scn *s;
if (opts.dryrun)
return 0;
/* Update changed relocation sections and section headers: */
list_for_each_entry(sec, &elf->sections, list) {
if (sec->truncate)
elf_truncate_section(elf, sec);
if (sec_changed(sec)) {
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
WARN_ELF("elf_getscn");
return -1;
}
/* Note this also flags the section dirty */
if (!gelf_update_shdr(s, &sec->sh)) {
WARN_ELF("gelf_update_shdr");
return -1;
}
mark_sec_changed(elf, sec, false);
}
}
/* Make sure the new section header entries get updated properly. */
elf_flagelf(elf->elf, ELF_C_SET, ELF_F_DIRTY);
/* Write all changes to the file. */
if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
WARN_ELF("elf_update");
return -1;
}
elf->changed = false;
return 0;
}
void elf_close(struct elf *elf)
{
if (elf->elf)
elf_end(elf->elf);
if (elf->fd > 0)
close(elf->fd);
/*
* NOTE: All remaining allocations are leaked on purpose. Objtool is
* about to exit anyway.
*/
}
| linux-master | tools/objtool/elf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <string.h>
#include <stdlib.h>
#include <objtool/special.h>
#include <objtool/builtin.h>
bool arch_support_alt_relocation(struct special_alt *special_alt,
struct instruction *insn,
struct reloc *reloc)
{
exit(-1);
}
struct reloc *arch_find_switch_table(struct objtool_file *file,
struct instruction *insn)
{
exit(-1);
}
| linux-master | tools/objtool/arch/powerpc/special.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <stdio.h>
#include <stdlib.h>
#include <objtool/check.h>
#include <objtool/elf.h>
#include <objtool/arch.h>
#include <objtool/warn.h>
#include <objtool/builtin.h>
#include <objtool/endianness.h>
int arch_ftrace_match(char *name)
{
return !strcmp(name, "_mcount");
}
unsigned long arch_dest_reloc_offset(int addend)
{
return addend;
}
bool arch_callee_saved_reg(unsigned char reg)
{
return false;
}
int arch_decode_hint_reg(u8 sp_reg, int *base)
{
exit(-1);
}
const char *arch_nop_insn(int len)
{
exit(-1);
}
const char *arch_ret_insn(int len)
{
exit(-1);
}
int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
unsigned long offset, unsigned int maxlen,
struct instruction *insn)
{
unsigned int opcode;
enum insn_type typ;
unsigned long imm;
u32 ins;
ins = bswap_if_needed(file->elf, *(u32 *)(sec->data->d_buf + offset));
opcode = ins >> 26;
typ = INSN_OTHER;
imm = 0;
switch (opcode) {
case 18: /* b[l][a] */
if ((ins & 3) == 1) /* bl */
typ = INSN_CALL;
imm = ins & 0x3fffffc;
if (imm & 0x2000000)
imm -= 0x4000000;
break;
}
if (opcode == 1)
insn->len = 8;
else
insn->len = 4;
insn->type = typ;
insn->immediate = imm;
return 0;
}
unsigned long arch_jump_destination(struct instruction *insn)
{
return insn->offset + insn->immediate;
}
bool arch_pc_relative_reloc(struct reloc *reloc)
{
/*
* The powerpc build only allows certain relocation types, see
* relocs_check.sh, and none of those accepted are PC relative.
*/
return false;
}
void arch_initial_func_cfi_state(struct cfi_init_state *state)
{
int i;
for (i = 0; i < CFI_NUM_REGS; i++) {
state->regs[i].base = CFI_UNDEFINED;
state->regs[i].offset = 0;
}
/* initial CFA (call frame address) */
state->cfa.base = CFI_SP;
state->cfa.offset = 0;
/* initial LR (return address) */
state->regs[CFI_RA].base = CFI_CFA;
state->regs[CFI_RA].offset = 0;
}
| linux-master | tools/objtool/arch/powerpc/decode.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <string.h>
#include <objtool/special.h>
#include <objtool/builtin.h>
#define X86_FEATURE_POPCNT (4 * 32 + 23)
#define X86_FEATURE_SMAP (9 * 32 + 20)
void arch_handle_alternative(unsigned short feature, struct special_alt *alt)
{
switch (feature) {
case X86_FEATURE_SMAP:
/*
* If UACCESS validation is enabled; force that alternative;
* otherwise force it the other way.
*
* What we want to avoid is having both the original and the
* alternative code flow at the same time, in that case we can
* find paths that see the STAC but take the NOP instead of
* CLAC and the other way around.
*/
if (opts.uaccess)
alt->skip_orig = true;
else
alt->skip_alt = true;
break;
case X86_FEATURE_POPCNT:
/*
* It has been requested that we don't validate the !POPCNT
* feature path which is a "very very small percentage of
* machines".
*/
alt->skip_orig = true;
break;
default:
break;
}
}
bool arch_support_alt_relocation(struct special_alt *special_alt,
struct instruction *insn,
struct reloc *reloc)
{
return true;
}
/*
* There are 3 basic jump table patterns:
*
* 1. jmpq *[rodata addr](,%reg,8)
*
* This is the most common case by far. It jumps to an address in a simple
* jump table which is stored in .rodata.
*
* 2. jmpq *[rodata addr](%rip)
*
* This is caused by a rare GCC quirk, currently only seen in three driver
* functions in the kernel, only with certain obscure non-distro configs.
*
* As part of an optimization, GCC makes a copy of an existing switch jump
* table, modifies it, and then hard-codes the jump (albeit with an indirect
* jump) to use a single entry in the table. The rest of the jump table and
* some of its jump targets remain as dead code.
*
* In such a case we can just crudely ignore all unreachable instruction
* warnings for the entire object file. Ideally we would just ignore them
* for the function, but that would require redesigning the code quite a
* bit. And honestly that's just not worth doing: unreachable instruction
* warnings are of questionable value anyway, and this is such a rare issue.
*
* 3. mov [rodata addr],%reg1
* ... some instructions ...
* jmpq *(%reg1,%reg2,8)
*
* This is a fairly uncommon pattern which is new for GCC 6. As of this
* writing, there are 11 occurrences of it in the allmodconfig kernel.
*
* As of GCC 7 there are quite a few more of these and the 'in between' code
* is significant. Esp. with KASAN enabled some of the code between the mov
* and jmpq uses .rodata itself, which can confuse things.
*
* TODO: Once we have DWARF CFI and smarter instruction decoding logic,
* ensure the same register is used in the mov and jump instructions.
*
* NOTE: RETPOLINE made it harder still to decode dynamic jumps.
*/
struct reloc *arch_find_switch_table(struct objtool_file *file,
struct instruction *insn)
{
struct reloc *text_reloc, *rodata_reloc;
struct section *table_sec;
unsigned long table_offset;
/* look for a relocation which references .rodata */
text_reloc = find_reloc_by_dest_range(file->elf, insn->sec,
insn->offset, insn->len);
if (!text_reloc || text_reloc->sym->type != STT_SECTION ||
!text_reloc->sym->sec->rodata)
return NULL;
table_offset = reloc_addend(text_reloc);
table_sec = text_reloc->sym->sec;
if (reloc_type(text_reloc) == R_X86_64_PC32)
table_offset += 4;
/*
* Make sure the .rodata address isn't associated with a
* symbol. GCC jump tables are anonymous data.
*
* Also support C jump tables which are in the same format as
* switch jump tables. For objtool to recognize them, they
* need to be placed in the C_JUMP_TABLE_SECTION section. They
* have symbols associated with them.
*/
if (find_symbol_containing(table_sec, table_offset) &&
strcmp(table_sec->name, C_JUMP_TABLE_SECTION))
return NULL;
/*
* Each table entry has a rela associated with it. The rela
* should reference text in the same function as the original
* instruction.
*/
rodata_reloc = find_reloc_by_dest(file->elf, table_sec, table_offset);
if (!rodata_reloc)
return NULL;
/*
* Use of RIP-relative switch jumps is quite rare, and
* indicates a rare GCC quirk/bug which can leave dead
* code behind.
*/
if (reloc_type(text_reloc) == R_X86_64_PC32)
file->ignore_unreachables = true;
return rodata_reloc;
}
| linux-master | tools/objtool/arch/x86/special.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Josh Poimboeuf <[email protected]>
*/
#include <stdio.h>
#include <stdlib.h>
#define unlikely(cond) (cond)
#include <asm/insn.h>
#include "../../../arch/x86/lib/inat.c"
#include "../../../arch/x86/lib/insn.c"
#define CONFIG_64BIT 1
#include <asm/nops.h>
#include <asm/orc_types.h>
#include <objtool/check.h>
#include <objtool/elf.h>
#include <objtool/arch.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
#include <objtool/builtin.h>
#include <arch/elf.h>
int arch_ftrace_match(char *name)
{
return !strcmp(name, "__fentry__");
}
static int is_x86_64(const struct elf *elf)
{
switch (elf->ehdr.e_machine) {
case EM_X86_64:
return 1;
case EM_386:
return 0;
default:
WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
return -1;
}
}
bool arch_callee_saved_reg(unsigned char reg)
{
switch (reg) {
case CFI_BP:
case CFI_BX:
case CFI_R12:
case CFI_R13:
case CFI_R14:
case CFI_R15:
return true;
case CFI_AX:
case CFI_CX:
case CFI_DX:
case CFI_SI:
case CFI_DI:
case CFI_SP:
case CFI_R8:
case CFI_R9:
case CFI_R10:
case CFI_R11:
case CFI_RA:
default:
return false;
}
}
unsigned long arch_dest_reloc_offset(int addend)
{
return addend + 4;
}
unsigned long arch_jump_destination(struct instruction *insn)
{
return insn->offset + insn->len + insn->immediate;
}
bool arch_pc_relative_reloc(struct reloc *reloc)
{
/*
* All relocation types where P (the address of the target)
* is included in the computation.
*/
switch (reloc_type(reloc)) {
case R_X86_64_PC8:
case R_X86_64_PC16:
case R_X86_64_PC32:
case R_X86_64_PC64:
case R_X86_64_PLT32:
case R_X86_64_GOTPC32:
case R_X86_64_GOTPCREL:
return true;
default:
break;
}
return false;
}
#define ADD_OP(op) \
if (!(op = calloc(1, sizeof(*op)))) \
return -1; \
else for (*ops_list = op, ops_list = &op->next; op; op = NULL)
/*
* Helpers to decode ModRM/SIB:
*
* r/m| AX CX DX BX | SP | BP | SI DI |
* | R8 R9 R10 R11 | R12 | R13 | R14 R15 |
* Mod+----------------+-----+-----+---------+
* 00 | [r/m] |[SIB]|[IP+]| [r/m] |
* 01 | [r/m + d8] |[S+d]| [r/m + d8] |
* 10 | [r/m + d32] |[S+D]| [r/m + d32] |
* 11 | r/ m |
*/
#define mod_is_mem() (modrm_mod != 3)
#define mod_is_reg() (modrm_mod == 3)
#define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
#define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
#define rm_is(reg) (have_SIB() ? \
sib_base == (reg) && sib_index == CFI_SP : \
modrm_rm == (reg))
#define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
#define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg))
static bool has_notrack_prefix(struct insn *insn)
{
int i;
for (i = 0; i < insn->prefixes.nbytes; i++) {
if (insn->prefixes.bytes[i] == 0x3e)
return true;
}
return false;
}
int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
unsigned long offset, unsigned int maxlen,
struct instruction *insn)
{
struct stack_op **ops_list = &insn->stack_ops;
const struct elf *elf = file->elf;
struct insn ins;
int x86_64, ret;
unsigned char op1, op2, op3, prefix,
rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0,
modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0,
sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0;
struct stack_op *op = NULL;
struct symbol *sym;
u64 imm;
x86_64 = is_x86_64(elf);
if (x86_64 == -1)
return -1;
ret = insn_decode(&ins, sec->data->d_buf + offset, maxlen,
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
if (ret < 0) {
WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
return -1;
}
insn->len = ins.length;
insn->type = INSN_OTHER;
if (ins.vex_prefix.nbytes)
return 0;
prefix = ins.prefixes.bytes[0];
op1 = ins.opcode.bytes[0];
op2 = ins.opcode.bytes[1];
op3 = ins.opcode.bytes[2];
if (ins.rex_prefix.nbytes) {
rex = ins.rex_prefix.bytes[0];
rex_w = X86_REX_W(rex) >> 3;
rex_r = X86_REX_R(rex) >> 2;
rex_x = X86_REX_X(rex) >> 1;
rex_b = X86_REX_B(rex);
}
if (ins.modrm.nbytes) {
modrm = ins.modrm.bytes[0];
modrm_mod = X86_MODRM_MOD(modrm);
modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r;
modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b;
}
if (ins.sib.nbytes) {
sib = ins.sib.bytes[0];
/* sib_scale = X86_SIB_SCALE(sib); */
sib_index = X86_SIB_INDEX(sib) + 8*rex_x;
sib_base = X86_SIB_BASE(sib) + 8*rex_b;
}
switch (op1) {
case 0x1:
case 0x29:
if (rex_w && rm_is_reg(CFI_SP)) {
/* add/sub reg, %rsp */
ADD_OP(op) {
op->src.type = OP_SRC_ADD;
op->src.reg = modrm_reg;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
}
break;
case 0x50 ... 0x57:
/* push reg */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = (op1 & 0x7) + 8*rex_b;
op->dest.type = OP_DEST_PUSH;
}
break;
case 0x58 ... 0x5f:
/* pop reg */
ADD_OP(op) {
op->src.type = OP_SRC_POP;
op->dest.type = OP_DEST_REG;
op->dest.reg = (op1 & 0x7) + 8*rex_b;
}
break;
case 0x68:
case 0x6a:
/* push immediate */
ADD_OP(op) {
op->src.type = OP_SRC_CONST;
op->dest.type = OP_DEST_PUSH;
}
break;
case 0x70 ... 0x7f:
insn->type = INSN_JUMP_CONDITIONAL;
break;
case 0x80 ... 0x83:
/*
* 1000 00sw : mod OP r/m : immediate
*
* s - sign extend immediate
* w - imm8 / imm32
*
* OP: 000 ADD 100 AND
* 001 OR 101 SUB
* 010 ADC 110 XOR
* 011 SBB 111 CMP
*/
/* 64bit only */
if (!rex_w)
break;
/* %rsp target only */
if (!rm_is_reg(CFI_SP))
break;
imm = ins.immediate.value;
if (op1 & 2) { /* sign extend */
if (op1 & 1) { /* imm32 */
imm <<= 32;
imm = (s64)imm >> 32;
} else { /* imm8 */
imm <<= 56;
imm = (s64)imm >> 56;
}
}
switch (modrm_reg & 7) {
case 5:
imm = -imm;
/* fallthrough */
case 0:
/* add/sub imm, %rsp */
ADD_OP(op) {
op->src.type = OP_SRC_ADD;
op->src.reg = CFI_SP;
op->src.offset = imm;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
break;
case 4:
/* and imm, %rsp */
ADD_OP(op) {
op->src.type = OP_SRC_AND;
op->src.reg = CFI_SP;
op->src.offset = ins.immediate.value;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
break;
default:
/* WARN ? */
break;
}
break;
case 0x89:
if (!rex_w)
break;
if (modrm_reg == CFI_SP) {
if (mod_is_reg()) {
/* mov %rsp, reg */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = CFI_SP;
op->dest.type = OP_DEST_REG;
op->dest.reg = modrm_rm;
}
break;
} else {
/* skip RIP relative displacement */
if (is_RIP())
break;
/* skip nontrivial SIB */
if (have_SIB()) {
modrm_rm = sib_base;
if (sib_index != CFI_SP)
break;
}
/* mov %rsp, disp(%reg) */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = CFI_SP;
op->dest.type = OP_DEST_REG_INDIRECT;
op->dest.reg = modrm_rm;
op->dest.offset = ins.displacement.value;
}
break;
}
break;
}
if (rm_is_reg(CFI_SP)) {
/* mov reg, %rsp */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = modrm_reg;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
break;
}
/* fallthrough */
case 0x88:
if (!rex_w)
break;
if (rm_is_mem(CFI_BP)) {
/* mov reg, disp(%rbp) */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = modrm_reg;
op->dest.type = OP_DEST_REG_INDIRECT;
op->dest.reg = CFI_BP;
op->dest.offset = ins.displacement.value;
}
break;
}
if (rm_is_mem(CFI_SP)) {
/* mov reg, disp(%rsp) */
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = modrm_reg;
op->dest.type = OP_DEST_REG_INDIRECT;
op->dest.reg = CFI_SP;
op->dest.offset = ins.displacement.value;
}
break;
}
break;
case 0x8b:
if (!rex_w)
break;
if (rm_is_mem(CFI_BP)) {
/* mov disp(%rbp), reg */
ADD_OP(op) {
op->src.type = OP_SRC_REG_INDIRECT;
op->src.reg = CFI_BP;
op->src.offset = ins.displacement.value;
op->dest.type = OP_DEST_REG;
op->dest.reg = modrm_reg;
}
break;
}
if (rm_is_mem(CFI_SP)) {
/* mov disp(%rsp), reg */
ADD_OP(op) {
op->src.type = OP_SRC_REG_INDIRECT;
op->src.reg = CFI_SP;
op->src.offset = ins.displacement.value;
op->dest.type = OP_DEST_REG;
op->dest.reg = modrm_reg;
}
break;
}
break;
case 0x8d:
if (mod_is_reg()) {
WARN("invalid LEA encoding at %s:0x%lx", sec->name, offset);
break;
}
/* skip non 64bit ops */
if (!rex_w)
break;
/* skip RIP relative displacement */
if (is_RIP())
break;
/* skip nontrivial SIB */
if (have_SIB()) {
modrm_rm = sib_base;
if (sib_index != CFI_SP)
break;
}
/* lea disp(%src), %dst */
ADD_OP(op) {
op->src.offset = ins.displacement.value;
if (!op->src.offset) {
/* lea (%src), %dst */
op->src.type = OP_SRC_REG;
} else {
/* lea disp(%src), %dst */
op->src.type = OP_SRC_ADD;
}
op->src.reg = modrm_rm;
op->dest.type = OP_DEST_REG;
op->dest.reg = modrm_reg;
}
break;
case 0x8f:
/* pop to mem */
ADD_OP(op) {
op->src.type = OP_SRC_POP;
op->dest.type = OP_DEST_MEM;
}
break;
case 0x90:
insn->type = INSN_NOP;
break;
case 0x9c:
/* pushf */
ADD_OP(op) {
op->src.type = OP_SRC_CONST;
op->dest.type = OP_DEST_PUSHF;
}
break;
case 0x9d:
/* popf */
ADD_OP(op) {
op->src.type = OP_SRC_POPF;
op->dest.type = OP_DEST_MEM;
}
break;
case 0x0f:
if (op2 == 0x01) {
if (modrm == 0xca)
insn->type = INSN_CLAC;
else if (modrm == 0xcb)
insn->type = INSN_STAC;
} else if (op2 >= 0x80 && op2 <= 0x8f) {
insn->type = INSN_JUMP_CONDITIONAL;
} else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
op2 == 0x35) {
/* sysenter, sysret */
insn->type = INSN_CONTEXT_SWITCH;
} else if (op2 == 0x0b || op2 == 0xb9) {
/* ud2 */
insn->type = INSN_BUG;
} else if (op2 == 0x0d || op2 == 0x1f) {
/* nopl/nopw */
insn->type = INSN_NOP;
} else if (op2 == 0x1e) {
if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb))
insn->type = INSN_ENDBR;
} else if (op2 == 0x38 && op3 == 0xf8) {
if (ins.prefixes.nbytes == 1 &&
ins.prefixes.bytes[0] == 0xf2) {
/* ENQCMD cannot be used in the kernel. */
WARN("ENQCMD instruction at %s:%lx", sec->name,
offset);
}
} else if (op2 == 0xa0 || op2 == 0xa8) {
/* push fs/gs */
ADD_OP(op) {
op->src.type = OP_SRC_CONST;
op->dest.type = OP_DEST_PUSH;
}
} else if (op2 == 0xa1 || op2 == 0xa9) {
/* pop fs/gs */
ADD_OP(op) {
op->src.type = OP_SRC_POP;
op->dest.type = OP_DEST_MEM;
}
}
break;
case 0xc9:
/*
* leave
*
* equivalent to:
* mov bp, sp
* pop bp
*/
ADD_OP(op) {
op->src.type = OP_SRC_REG;
op->src.reg = CFI_BP;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
ADD_OP(op) {
op->src.type = OP_SRC_POP;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_BP;
}
break;
case 0xcc:
/* int3 */
insn->type = INSN_TRAP;
break;
case 0xe3:
/* jecxz/jrcxz */
insn->type = INSN_JUMP_CONDITIONAL;
break;
case 0xe9:
case 0xeb:
insn->type = INSN_JUMP_UNCONDITIONAL;
break;
case 0xc2:
case 0xc3:
insn->type = INSN_RETURN;
break;
case 0xc7: /* mov imm, r/m */
if (!opts.noinstr)
break;
if (ins.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) {
struct reloc *immr, *disp;
struct symbol *func;
int idx;
immr = find_reloc_by_dest(elf, (void *)sec, offset+3);
disp = find_reloc_by_dest(elf, (void *)sec, offset+7);
if (!immr || strcmp(immr->sym->name, "pv_ops"))
break;
idx = (reloc_addend(immr) + 8) / sizeof(void *);
func = disp->sym;
if (disp->sym->type == STT_SECTION)
func = find_symbol_by_offset(disp->sym->sec, reloc_addend(disp));
if (!func) {
WARN("no func for pv_ops[]");
return -1;
}
objtool_pv_add(file, idx, func);
}
break;
case 0xcf: /* iret */
/*
* Handle sync_core(), which has an IRET to self.
* All other IRET are in STT_NONE entry code.
*/
sym = find_symbol_containing(sec, offset);
if (sym && sym->type == STT_FUNC) {
ADD_OP(op) {
/* add $40, %rsp */
op->src.type = OP_SRC_ADD;
op->src.reg = CFI_SP;
op->src.offset = 5*8;
op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
break;
}
/* fallthrough */
case 0xca: /* retf */
case 0xcb: /* retf */
insn->type = INSN_CONTEXT_SWITCH;
break;
case 0xe0: /* loopne */
case 0xe1: /* loope */
case 0xe2: /* loop */
insn->type = INSN_JUMP_CONDITIONAL;
break;
case 0xe8:
insn->type = INSN_CALL;
/*
* For the impact on the stack, a CALL behaves like
* a PUSH of an immediate value (the return address).
*/
ADD_OP(op) {
op->src.type = OP_SRC_CONST;
op->dest.type = OP_DEST_PUSH;
}
break;
case 0xfc:
insn->type = INSN_CLD;
break;
case 0xfd:
insn->type = INSN_STD;
break;
case 0xff:
if (modrm_reg == 2 || modrm_reg == 3) {
insn->type = INSN_CALL_DYNAMIC;
if (has_notrack_prefix(&ins))
WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
} else if (modrm_reg == 4) {
insn->type = INSN_JUMP_DYNAMIC;
if (has_notrack_prefix(&ins))
WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
} else if (modrm_reg == 5) {
/* jmpf */
insn->type = INSN_CONTEXT_SWITCH;
} else if (modrm_reg == 6) {
/* push from mem */
ADD_OP(op) {
op->src.type = OP_SRC_CONST;
op->dest.type = OP_DEST_PUSH;
}
}
break;
default:
break;
}
insn->immediate = ins.immediate.nbytes ? ins.immediate.value : 0;
return 0;
}
void arch_initial_func_cfi_state(struct cfi_init_state *state)
{
int i;
for (i = 0; i < CFI_NUM_REGS; i++) {
state->regs[i].base = CFI_UNDEFINED;
state->regs[i].offset = 0;
}
/* initial CFA (call frame address) */
state->cfa.base = CFI_SP;
state->cfa.offset = 8;
/* initial RA (return address) */
state->regs[CFI_RA].base = CFI_CFA;
state->regs[CFI_RA].offset = -8;
}
const char *arch_nop_insn(int len)
{
static const char nops[5][5] = {
{ BYTES_NOP1 },
{ BYTES_NOP2 },
{ BYTES_NOP3 },
{ BYTES_NOP4 },
{ BYTES_NOP5 },
};
if (len < 1 || len > 5) {
WARN("invalid NOP size: %d\n", len);
return NULL;
}
return nops[len-1];
}
#define BYTE_RET 0xC3
const char *arch_ret_insn(int len)
{
static const char ret[5][5] = {
{ BYTE_RET },
{ BYTE_RET, 0xcc },
{ BYTE_RET, 0xcc, BYTES_NOP1 },
{ BYTE_RET, 0xcc, BYTES_NOP2 },
{ BYTE_RET, 0xcc, BYTES_NOP3 },
};
if (len < 1 || len > 5) {
WARN("invalid RET size: %d\n", len);
return NULL;
}
return ret[len-1];
}
int arch_decode_hint_reg(u8 sp_reg, int *base)
{
switch (sp_reg) {
case ORC_REG_UNDEFINED:
*base = CFI_UNDEFINED;
break;
case ORC_REG_SP:
*base = CFI_SP;
break;
case ORC_REG_BP:
*base = CFI_BP;
break;
case ORC_REG_SP_INDIRECT:
*base = CFI_SP_INDIRECT;
break;
case ORC_REG_R10:
*base = CFI_R10;
break;
case ORC_REG_R13:
*base = CFI_R13;
break;
case ORC_REG_DI:
*base = CFI_DI;
break;
case ORC_REG_DX:
*base = CFI_DX;
break;
default:
return -1;
}
return 0;
}
bool arch_is_retpoline(struct symbol *sym)
{
return !strncmp(sym->name, "__x86_indirect_", 15);
}
bool arch_is_rethunk(struct symbol *sym)
{
return !strcmp(sym->name, "__x86_return_thunk");
}
bool arch_is_embedded_insn(struct symbol *sym)
{
return !strcmp(sym->name, "retbleed_return_thunk") ||
!strcmp(sym->name, "srso_safe_ret");
}
| linux-master | tools/objtool/arch/x86/decode.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 32bit Socket syscall emulation. Based on arch/sparc64/kernel/sys_sparc32.c.
*
* Copyright (C) 2000 VA Linux Co
* Copyright (C) 2000 Don Dugger <[email protected]>
* Copyright (C) 1999 Arun Sharma <[email protected]>
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
* Copyright (C) 1997 David S. Miller ([email protected])
* Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <[email protected]>
* Copyright (C) 2000,2001 Andi Kleen, SuSE Labs
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/file.h>
#include <linux/icmpv6.h>
#include <linux/socket.h>
#include <linux/syscalls.h>
#include <linux/filter.h>
#include <linux/compat.h>
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/export.h>
#include <net/scm.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/uaccess.h>
#include <net/compat.h>
int __get_compat_msghdr(struct msghdr *kmsg,
struct compat_msghdr *msg,
struct sockaddr __user **save_addr)
{
ssize_t err;
kmsg->msg_flags = msg->msg_flags;
kmsg->msg_namelen = msg->msg_namelen;
if (!msg->msg_name)
kmsg->msg_namelen = 0;
if (kmsg->msg_namelen < 0)
return -EINVAL;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
kmsg->msg_control_is_user = true;
kmsg->msg_get_inq = 0;
kmsg->msg_control_user = compat_ptr(msg->msg_control);
kmsg->msg_controllen = msg->msg_controllen;
if (save_addr)
*save_addr = compat_ptr(msg->msg_name);
if (msg->msg_name && kmsg->msg_namelen) {
if (!save_addr) {
err = move_addr_to_kernel(compat_ptr(msg->msg_name),
kmsg->msg_namelen,
kmsg->msg_name);
if (err < 0)
return err;
}
} else {
kmsg->msg_name = NULL;
kmsg->msg_namelen = 0;
}
if (msg->msg_iovlen > UIO_MAXIOV)
return -EMSGSIZE;
kmsg->msg_iocb = NULL;
kmsg->msg_ubuf = NULL;
return 0;
}
int get_compat_msghdr(struct msghdr *kmsg,
struct compat_msghdr __user *umsg,
struct sockaddr __user **save_addr,
struct iovec **iov)
{
struct compat_msghdr msg;
ssize_t err;
if (copy_from_user(&msg, umsg, sizeof(*umsg)))
return -EFAULT;
err = __get_compat_msghdr(kmsg, &msg, save_addr);
if (err)
return err;
err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE,
compat_ptr(msg.msg_iov), msg.msg_iovlen,
UIO_FASTIOV, iov, &kmsg->msg_iter);
return err < 0 ? err : 0;
}
/* Bleech... */
#define CMSG_COMPAT_ALIGN(len) ALIGN((len), sizeof(s32))
#define CMSG_COMPAT_DATA(cmsg) \
((void __user *)((char __user *)(cmsg) + sizeof(struct compat_cmsghdr)))
#define CMSG_COMPAT_SPACE(len) \
(sizeof(struct compat_cmsghdr) + CMSG_COMPAT_ALIGN(len))
#define CMSG_COMPAT_LEN(len) \
(sizeof(struct compat_cmsghdr) + (len))
#define CMSG_COMPAT_FIRSTHDR(msg) \
(((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
(struct compat_cmsghdr __user *)((msg)->msg_control_user) : \
(struct compat_cmsghdr __user *)NULL)
#define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
((ucmlen) >= sizeof(struct compat_cmsghdr) && \
(ucmlen) <= (unsigned long) \
((mhdr)->msg_controllen - \
((char __user *)(ucmsg) - (char __user *)(mhdr)->msg_control_user)))
static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
struct compat_cmsghdr __user *cmsg, int cmsg_len)
{
char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control_user) >
msg->msg_controllen)
return NULL;
return (struct compat_cmsghdr __user *)ptr;
}
/* There is a lot of hair here because the alignment rules (and
* thus placement) of cmsg headers and length are different for
* 32-bit apps. -DaveM
*/
int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
unsigned char *stackbuf, int stackbuf_size)
{
struct compat_cmsghdr __user *ucmsg;
struct cmsghdr *kcmsg, *kcmsg_base;
compat_size_t ucmlen;
__kernel_size_t kcmlen, tmp;
int err = -EFAULT;
BUILD_BUG_ON(sizeof(struct compat_cmsghdr) !=
CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)));
kcmlen = 0;
kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf;
ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg);
while (ucmsg != NULL) {
if (get_user(ucmlen, &ucmsg->cmsg_len))
return -EFAULT;
/* Catch bogons. */
if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg))
return -EINVAL;
tmp = ((ucmlen - sizeof(*ucmsg)) + sizeof(struct cmsghdr));
tmp = CMSG_ALIGN(tmp);
kcmlen += tmp;
ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
}
if (kcmlen == 0)
return -EINVAL;
/* The kcmlen holds the 64-bit version of the control length.
* It may not be modified as we do not stick it into the kmsg
* until we have successfully copied over all of the data
* from the user.
*/
if (kcmlen > stackbuf_size)
kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
if (kcmsg == NULL)
return -ENOMEM;
/* Now copy them over neatly. */
memset(kcmsg, 0, kcmlen);
ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg);
while (ucmsg != NULL) {
struct compat_cmsghdr cmsg;
if (copy_from_user(&cmsg, ucmsg, sizeof(cmsg)))
goto Efault;
if (!CMSG_COMPAT_OK(cmsg.cmsg_len, ucmsg, kmsg))
goto Einval;
tmp = ((cmsg.cmsg_len - sizeof(*ucmsg)) + sizeof(struct cmsghdr));
if ((char *)kcmsg_base + kcmlen - (char *)kcmsg < CMSG_ALIGN(tmp))
goto Einval;
kcmsg->cmsg_len = tmp;
kcmsg->cmsg_level = cmsg.cmsg_level;
kcmsg->cmsg_type = cmsg.cmsg_type;
tmp = CMSG_ALIGN(tmp);
if (copy_from_user(CMSG_DATA(kcmsg),
CMSG_COMPAT_DATA(ucmsg),
(cmsg.cmsg_len - sizeof(*ucmsg))))
goto Efault;
/* Advance. */
kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp);
ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, cmsg.cmsg_len);
}
/*
* check the length of messages copied in is the same as the
* what we get from the first loop
*/
if ((char *)kcmsg - (char *)kcmsg_base != kcmlen)
goto Einval;
/* Ok, looks like we made it. Hook it up and return success. */
kmsg->msg_control_is_user = false;
kmsg->msg_control = kcmsg_base;
kmsg->msg_controllen = kcmlen;
return 0;
Einval:
err = -EINVAL;
Efault:
if (kcmsg_base != (struct cmsghdr *)stackbuf)
sock_kfree_s(sk, kcmsg_base, kcmlen);
return err;
}
int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
{
struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control_user;
struct compat_cmsghdr cmhdr;
struct old_timeval32 ctv;
struct old_timespec32 cts[3];
int cmlen;
if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
kmsg->msg_flags |= MSG_CTRUNC;
return 0; /* XXX: return error? check spec. */
}
if (!COMPAT_USE_64BIT_TIME) {
if (level == SOL_SOCKET && type == SO_TIMESTAMP_OLD) {
struct __kernel_old_timeval *tv = (struct __kernel_old_timeval *)data;
ctv.tv_sec = tv->tv_sec;
ctv.tv_usec = tv->tv_usec;
data = &ctv;
len = sizeof(ctv);
}
if (level == SOL_SOCKET &&
(type == SO_TIMESTAMPNS_OLD || type == SO_TIMESTAMPING_OLD)) {
int count = type == SO_TIMESTAMPNS_OLD ? 1 : 3;
int i;
struct __kernel_old_timespec *ts = data;
for (i = 0; i < count; i++) {
cts[i].tv_sec = ts[i].tv_sec;
cts[i].tv_nsec = ts[i].tv_nsec;
}
data = &cts;
len = sizeof(cts[0]) * count;
}
}
cmlen = CMSG_COMPAT_LEN(len);
if (kmsg->msg_controllen < cmlen) {
kmsg->msg_flags |= MSG_CTRUNC;
cmlen = kmsg->msg_controllen;
}
cmhdr.cmsg_level = level;
cmhdr.cmsg_type = type;
cmhdr.cmsg_len = cmlen;
if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
return -EFAULT;
if (copy_to_user(CMSG_COMPAT_DATA(cm), data, cmlen - sizeof(struct compat_cmsghdr)))
return -EFAULT;
cmlen = CMSG_COMPAT_SPACE(len);
if (kmsg->msg_controllen < cmlen)
cmlen = kmsg->msg_controllen;
kmsg->msg_control_user += cmlen;
kmsg->msg_controllen -= cmlen;
return 0;
}
static int scm_max_fds_compat(struct msghdr *msg)
{
if (msg->msg_controllen <= sizeof(struct compat_cmsghdr))
return 0;
return (msg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
}
void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm)
{
struct compat_cmsghdr __user *cm =
(struct compat_cmsghdr __user *)msg->msg_control_user;
unsigned int o_flags = (msg->msg_flags & MSG_CMSG_CLOEXEC) ? O_CLOEXEC : 0;
int fdmax = min_t(int, scm_max_fds_compat(msg), scm->fp->count);
int __user *cmsg_data = CMSG_COMPAT_DATA(cm);
int err = 0, i;
for (i = 0; i < fdmax; i++) {
err = receive_fd_user(scm->fp->fp[i], cmsg_data + i, o_flags);
if (err < 0)
break;
}
if (i > 0) {
int cmlen = CMSG_COMPAT_LEN(i * sizeof(int));
err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
err = put_user(cmlen, &cm->cmsg_len);
if (!err) {
cmlen = CMSG_COMPAT_SPACE(i * sizeof(int));
if (msg->msg_controllen < cmlen)
cmlen = msg->msg_controllen;
msg->msg_control_user += cmlen;
msg->msg_controllen -= cmlen;
}
}
if (i < scm->fp->count || (scm->fp->count && fdmax <= 0))
msg->msg_flags |= MSG_CTRUNC;
/*
* All of the files that fit in the message have had their usage counts
* incremented, so we just free the list.
*/
__scm_destroy(scm);
}
/* Argument list sizes for compat_sys_socketcall */
#define AL(x) ((x) * sizeof(u32))
static unsigned char nas[21] = {
AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
AL(4), AL(5), AL(4)
};
#undef AL
static inline long __compat_sys_sendmsg(int fd,
struct compat_msghdr __user *msg,
unsigned int flags)
{
return __sys_sendmsg(fd, (struct user_msghdr __user *)msg,
flags | MSG_CMSG_COMPAT, false);
}
COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg,
unsigned int, flags)
{
return __compat_sys_sendmsg(fd, msg, flags);
}
static inline long __compat_sys_sendmmsg(int fd,
struct compat_mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags)
{
return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, false);
}
COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags)
{
return __compat_sys_sendmmsg(fd, mmsg, vlen, flags);
}
static inline long __compat_sys_recvmsg(int fd,
struct compat_msghdr __user *msg,
unsigned int flags)
{
return __sys_recvmsg(fd, (struct user_msghdr __user *)msg,
flags | MSG_CMSG_COMPAT, false);
}
COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg,
unsigned int, flags)
{
return __compat_sys_recvmsg(fd, msg, flags);
}
static inline long __compat_sys_recvfrom(int fd, void __user *buf,
compat_size_t len, unsigned int flags,
struct sockaddr __user *addr,
int __user *addrlen)
{
return __sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr,
addrlen);
}
COMPAT_SYSCALL_DEFINE4(recv, int, fd, void __user *, buf, compat_size_t, len, unsigned int, flags)
{
return __compat_sys_recvfrom(fd, buf, len, flags, NULL, NULL);
}
COMPAT_SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, buf, compat_size_t, len,
unsigned int, flags, struct sockaddr __user *, addr,
int __user *, addrlen)
{
return __compat_sys_recvfrom(fd, buf, len, flags, addr, addrlen);
}
COMPAT_SYSCALL_DEFINE5(recvmmsg_time64, int, fd, struct compat_mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
struct __kernel_timespec __user *, timeout)
{
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, timeout, NULL);
}
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE5(recvmmsg_time32, int, fd, struct compat_mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
struct old_timespec32 __user *, timeout)
{
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL, timeout);
}
#endif
COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
{
u32 a[AUDITSC_ARGS];
unsigned int len;
u32 a0, a1;
int ret;
if (call < SYS_SOCKET || call > SYS_SENDMMSG)
return -EINVAL;
len = nas[call];
if (len > sizeof(a))
return -EINVAL;
if (copy_from_user(a, args, len))
return -EFAULT;
ret = audit_socketcall_compat(len / sizeof(a[0]), a);
if (ret)
return ret;
a0 = a[0];
a1 = a[1];
switch (call) {
case SYS_SOCKET:
ret = __sys_socket(a0, a1, a[2]);
break;
case SYS_BIND:
ret = __sys_bind(a0, compat_ptr(a1), a[2]);
break;
case SYS_CONNECT:
ret = __sys_connect(a0, compat_ptr(a1), a[2]);
break;
case SYS_LISTEN:
ret = __sys_listen(a0, a1);
break;
case SYS_ACCEPT:
ret = __sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), 0);
break;
case SYS_GETSOCKNAME:
ret = __sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2]));
break;
case SYS_GETPEERNAME:
ret = __sys_getpeername(a0, compat_ptr(a1), compat_ptr(a[2]));
break;
case SYS_SOCKETPAIR:
ret = __sys_socketpair(a0, a1, a[2], compat_ptr(a[3]));
break;
case SYS_SEND:
ret = __sys_sendto(a0, compat_ptr(a1), a[2], a[3], NULL, 0);
break;
case SYS_SENDTO:
ret = __sys_sendto(a0, compat_ptr(a1), a[2], a[3],
compat_ptr(a[4]), a[5]);
break;
case SYS_RECV:
ret = __compat_sys_recvfrom(a0, compat_ptr(a1), a[2], a[3],
NULL, NULL);
break;
case SYS_RECVFROM:
ret = __compat_sys_recvfrom(a0, compat_ptr(a1), a[2], a[3],
compat_ptr(a[4]),
compat_ptr(a[5]));
break;
case SYS_SHUTDOWN:
ret = __sys_shutdown(a0, a1);
break;
case SYS_SETSOCKOPT:
ret = __sys_setsockopt(a0, a1, a[2], compat_ptr(a[3]), a[4]);
break;
case SYS_GETSOCKOPT:
ret = __sys_getsockopt(a0, a1, a[2], compat_ptr(a[3]),
compat_ptr(a[4]));
break;
case SYS_SENDMSG:
ret = __compat_sys_sendmsg(a0, compat_ptr(a1), a[2]);
break;
case SYS_SENDMMSG:
ret = __compat_sys_sendmmsg(a0, compat_ptr(a1), a[2], a[3]);
break;
case SYS_RECVMSG:
ret = __compat_sys_recvmsg(a0, compat_ptr(a1), a[2]);
break;
case SYS_RECVMMSG:
ret = __sys_recvmmsg(a0, compat_ptr(a1), a[2],
a[3] | MSG_CMSG_COMPAT, NULL,
compat_ptr(a[4]));
break;
case SYS_ACCEPT4:
ret = __sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
| linux-master | net/compat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NET An implementation of the SOCKET network access protocol.
*
* Version: @(#)socket.c 1.1.93 18/02/95
*
* Authors: Orest Zborowski, <[email protected]>
* Ross Biro
* Fred N. van Kempen, <[email protected]>
*
* Fixes:
* Anonymous : NOTSOCK/BADF cleanup. Error fix in
* shutdown()
* Alan Cox : verify_area() fixes
* Alan Cox : Removed DDI
* Jonathan Kamens : SOCK_DGRAM reconnect bug
* Alan Cox : Moved a load of checks to the very
* top level.
* Alan Cox : Move address structures to/from user
* mode above the protocol layers.
* Rob Janssen : Allow 0 length sends.
* Alan Cox : Asynchronous I/O support (cribbed from the
* tty drivers).
* Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style)
* Jeff Uphoff : Made max number of sockets command-line
* configurable.
* Matti Aarnio : Made the number of sockets dynamic,
* to be allocated when needed, and mr.
* Uphoff's max is used as max to be
* allowed to allocate.
* Linus : Argh. removed all the socket allocation
* altogether: it's in the inode now.
* Alan Cox : Made sock_alloc()/sock_release() public
* for NetROM and future kernel nfsd type
* stuff.
* Alan Cox : sendmsg/recvmsg basics.
* Tom Dyas : Export net symbols.
* Marcin Dalecki : Fixed problems with CONFIG_NET="n".
* Alan Cox : Added thread locking to sys_* calls
* for sockets. May have errors at the
* moment.
* Kevin Buhr : Fixed the dumb errors in the above.
* Andi Kleen : Some small cleanups, optimizations,
* and fixed a copy_from_user() bug.
* Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0)
* Tigran Aivazian : Made listen(2) backlog sanity checks
* protocol-independent
*
* This module is effectively the top level interface to the BSD socket
* paradigm.
*
* Based upon Swansea University Computer Society NET3.039
*/
#include <linux/bpf-cgroup.h>
#include <linux/ethtool.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/file.h>
#include <linux/splice.h>
#include <linux/net.h>
#include <linux/interrupt.h>
#include <linux/thread_info.h>
#include <linux/rcupdate.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/ptp_classify.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/kmod.h>
#include <linux/audit.h>
#include <linux/wireless.h>
#include <linux/nsproxy.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/nospec.h>
#include <linux/indirect_call_wrapper.h>
#include <linux/io_uring.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <net/compat.h>
#include <net/wext.h>
#include <net/cls_cgroup.h>
#include <net/sock.h>
#include <linux/netfilter.h>
#include <linux/if_tun.h>
#include <linux/ipv6_route.h>
#include <linux/route.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <net/busy_poll.h>
#include <linux/errqueue.h>
#include <linux/ptp_clock_kernel.h>
#include <trace/events/sock.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
static int sock_close(struct inode *inode, struct file *file);
static __poll_t sock_poll(struct file *file,
struct poll_table_struct *wait);
static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long compat_sock_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
#endif
static int sock_fasync(int fd, struct file *filp, int on);
static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
static void sock_splice_eof(struct file *file);
#ifdef CONFIG_PROC_FS
static void sock_show_fdinfo(struct seq_file *m, struct file *f)
{
struct socket *sock = f->private_data;
const struct proto_ops *ops = READ_ONCE(sock->ops);
if (ops->show_fdinfo)
ops->show_fdinfo(m, sock);
}
#else
#define sock_show_fdinfo NULL
#endif
/*
* Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
* in the operation structures but are done directly via the socketcall() multiplexor.
*/
static const struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read_iter = sock_read_iter,
.write_iter = sock_write_iter,
.poll = sock_poll,
.unlocked_ioctl = sock_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_sock_ioctl,
#endif
.uring_cmd = io_uring_cmd_sock,
.mmap = sock_mmap,
.release = sock_close,
.fasync = sock_fasync,
.splice_write = splice_to_socket,
.splice_read = sock_splice_read,
.splice_eof = sock_splice_eof,
.show_fdinfo = sock_show_fdinfo,
};
static const char * const pf_family_names[] = {
[PF_UNSPEC] = "PF_UNSPEC",
[PF_UNIX] = "PF_UNIX/PF_LOCAL",
[PF_INET] = "PF_INET",
[PF_AX25] = "PF_AX25",
[PF_IPX] = "PF_IPX",
[PF_APPLETALK] = "PF_APPLETALK",
[PF_NETROM] = "PF_NETROM",
[PF_BRIDGE] = "PF_BRIDGE",
[PF_ATMPVC] = "PF_ATMPVC",
[PF_X25] = "PF_X25",
[PF_INET6] = "PF_INET6",
[PF_ROSE] = "PF_ROSE",
[PF_DECnet] = "PF_DECnet",
[PF_NETBEUI] = "PF_NETBEUI",
[PF_SECURITY] = "PF_SECURITY",
[PF_KEY] = "PF_KEY",
[PF_NETLINK] = "PF_NETLINK/PF_ROUTE",
[PF_PACKET] = "PF_PACKET",
[PF_ASH] = "PF_ASH",
[PF_ECONET] = "PF_ECONET",
[PF_ATMSVC] = "PF_ATMSVC",
[PF_RDS] = "PF_RDS",
[PF_SNA] = "PF_SNA",
[PF_IRDA] = "PF_IRDA",
[PF_PPPOX] = "PF_PPPOX",
[PF_WANPIPE] = "PF_WANPIPE",
[PF_LLC] = "PF_LLC",
[PF_IB] = "PF_IB",
[PF_MPLS] = "PF_MPLS",
[PF_CAN] = "PF_CAN",
[PF_TIPC] = "PF_TIPC",
[PF_BLUETOOTH] = "PF_BLUETOOTH",
[PF_IUCV] = "PF_IUCV",
[PF_RXRPC] = "PF_RXRPC",
[PF_ISDN] = "PF_ISDN",
[PF_PHONET] = "PF_PHONET",
[PF_IEEE802154] = "PF_IEEE802154",
[PF_CAIF] = "PF_CAIF",
[PF_ALG] = "PF_ALG",
[PF_NFC] = "PF_NFC",
[PF_VSOCK] = "PF_VSOCK",
[PF_KCM] = "PF_KCM",
[PF_QIPCRTR] = "PF_QIPCRTR",
[PF_SMC] = "PF_SMC",
[PF_XDP] = "PF_XDP",
[PF_MCTP] = "PF_MCTP",
};
/*
* The protocol list. Each protocol is registered in here.
*/
static DEFINE_SPINLOCK(net_family_lock);
static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
/*
* Support routines.
* Move socket addresses back and forth across the kernel/user
* divide and look after the messy bits.
*/
/**
* move_addr_to_kernel - copy a socket address into kernel space
* @uaddr: Address in user space
* @kaddr: Address in kernel space
* @ulen: Length in user space
*
* The address is copied into kernel space. If the provided address is
* too long an error code of -EINVAL is returned. If the copy gives
* invalid addresses -EFAULT is returned. On a success 0 is returned.
*/
int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr)
{
if (ulen < 0 || ulen > sizeof(struct sockaddr_storage))
return -EINVAL;
if (ulen == 0)
return 0;
if (copy_from_user(kaddr, uaddr, ulen))
return -EFAULT;
return audit_sockaddr(ulen, kaddr);
}
/**
* move_addr_to_user - copy an address to user space
* @kaddr: kernel space address
* @klen: length of address in kernel
* @uaddr: user space address
* @ulen: pointer to user length field
*
* The value pointed to by ulen on entry is the buffer length available.
* This is overwritten with the buffer space used. -EINVAL is returned
* if an overlong buffer is specified or a negative buffer size. -EFAULT
* is returned if either the buffer or the length field are not
* accessible.
* After copying the data up to the limit the user specifies, the true
* length of the data is written over the length limit the user
* specified. Zero is returned for a success.
*/
static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
void __user *uaddr, int __user *ulen)
{
int err;
int len;
BUG_ON(klen > sizeof(struct sockaddr_storage));
err = get_user(len, ulen);
if (err)
return err;
if (len > klen)
len = klen;
if (len < 0)
return -EINVAL;
if (len) {
if (audit_sockaddr(klen, kaddr))
return -ENOMEM;
if (copy_to_user(uaddr, kaddr, len))
return -EFAULT;
}
/*
* "fromlen shall refer to the value before truncation.."
* 1003.1g
*/
return __put_user(klen, ulen);
}
static struct kmem_cache *sock_inode_cachep __ro_after_init;
static struct inode *sock_alloc_inode(struct super_block *sb)
{
struct socket_alloc *ei;
ei = alloc_inode_sb(sb, sock_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
init_waitqueue_head(&ei->socket.wq.wait);
ei->socket.wq.fasync_list = NULL;
ei->socket.wq.flags = 0;
ei->socket.state = SS_UNCONNECTED;
ei->socket.flags = 0;
ei->socket.ops = NULL;
ei->socket.sk = NULL;
ei->socket.file = NULL;
return &ei->vfs_inode;
}
static void sock_free_inode(struct inode *inode)
{
struct socket_alloc *ei;
ei = container_of(inode, struct socket_alloc, vfs_inode);
kmem_cache_free(sock_inode_cachep, ei);
}
static void init_once(void *foo)
{
struct socket_alloc *ei = (struct socket_alloc *)foo;
inode_init_once(&ei->vfs_inode);
}
static void init_inodecache(void)
{
sock_inode_cachep = kmem_cache_create("sock_inode_cache",
sizeof(struct socket_alloc),
0,
(SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD | SLAB_ACCOUNT),
init_once);
BUG_ON(sock_inode_cachep == NULL);
}
static const struct super_operations sockfs_ops = {
.alloc_inode = sock_alloc_inode,
.free_inode = sock_free_inode,
.statfs = simple_statfs,
};
/*
* sockfs_dname() is called from d_path().
*/
static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(buffer, buflen, "socket:[%lu]",
d_inode(dentry)->i_ino);
}
static const struct dentry_operations sockfs_dentry_operations = {
.d_dname = sockfs_dname,
};
static int sockfs_xattr_get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *suffix, void *value, size_t size)
{
if (value) {
if (dentry->d_name.len + 1 > size)
return -ERANGE;
memcpy(value, dentry->d_name.name, dentry->d_name.len + 1);
}
return dentry->d_name.len + 1;
}
#define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname"
#define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX)
#define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1)
static const struct xattr_handler sockfs_xattr_handler = {
.name = XATTR_NAME_SOCKPROTONAME,
.get = sockfs_xattr_get,
};
static int sockfs_security_xattr_set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *dentry, struct inode *inode,
const char *suffix, const void *value,
size_t size, int flags)
{
/* Handled by LSM. */
return -EAGAIN;
}
static const struct xattr_handler sockfs_security_xattr_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.set = sockfs_security_xattr_set,
};
static const struct xattr_handler *sockfs_xattr_handlers[] = {
&sockfs_xattr_handler,
&sockfs_security_xattr_handler,
NULL
};
static int sockfs_init_fs_context(struct fs_context *fc)
{
struct pseudo_fs_context *ctx = init_pseudo(fc, SOCKFS_MAGIC);
if (!ctx)
return -ENOMEM;
ctx->ops = &sockfs_ops;
ctx->dops = &sockfs_dentry_operations;
ctx->xattr = sockfs_xattr_handlers;
return 0;
}
static struct vfsmount *sock_mnt __read_mostly;
static struct file_system_type sock_fs_type = {
.name = "sockfs",
.init_fs_context = sockfs_init_fs_context,
.kill_sb = kill_anon_super,
};
/*
* Obtains the first available file descriptor and sets it up for use.
*
* These functions create file structures and maps them to fd space
* of the current process. On success it returns file descriptor
* and file struct implicitly stored in sock->file.
* Note that another thread may close file descriptor before we return
* from this function. We use the fact that now we do not refer
* to socket after mapping. If one day we will need it, this
* function will increment ref. count on file by 1.
*
* In any case returned fd MAY BE not valid!
* This race condition is unavoidable
* with shared fd spaces, we cannot solve it inside kernel,
* but we take care of internal coherence yet.
*/
/**
* sock_alloc_file - Bind a &socket to a &file
* @sock: socket
* @flags: file status flags
* @dname: protocol name
*
* Returns the &file bound with @sock, implicitly storing it
* in sock->file. If dname is %NULL, sets to "".
*
* On failure @sock is released, and an ERR pointer is returned.
*
* This function uses GFP_KERNEL internally.
*/
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
{
struct file *file;
if (!dname)
dname = sock->sk ? sock->sk->sk_prot_creator->name : "";
file = alloc_file_pseudo(SOCK_INODE(sock), sock_mnt, dname,
O_RDWR | (flags & O_NONBLOCK),
&socket_file_ops);
if (IS_ERR(file)) {
sock_release(sock);
return file;
}
file->f_mode |= FMODE_NOWAIT;
sock->file = file;
file->private_data = sock;
stream_open(SOCK_INODE(sock), file);
return file;
}
EXPORT_SYMBOL(sock_alloc_file);
static int sock_map_fd(struct socket *sock, int flags)
{
struct file *newfile;
int fd = get_unused_fd_flags(flags);
if (unlikely(fd < 0)) {
sock_release(sock);
return fd;
}
newfile = sock_alloc_file(sock, flags, NULL);
if (!IS_ERR(newfile)) {
fd_install(fd, newfile);
return fd;
}
put_unused_fd(fd);
return PTR_ERR(newfile);
}
/**
* sock_from_file - Return the &socket bounded to @file.
* @file: file
*
* On failure returns %NULL.
*/
struct socket *sock_from_file(struct file *file)
{
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_alloc_file */
return NULL;
}
EXPORT_SYMBOL(sock_from_file);
/**
* sockfd_lookup - Go from a file number to its socket slot
* @fd: file handle
* @err: pointer to an error code return
*
* The file handle passed in is locked and the socket it is bound
* to is returned. If an error occurs the err pointer is overwritten
* with a negative errno code and NULL is returned. The function checks
* for both invalid handles and passing a handle which is not a socket.
*
* On a success the socket object pointer is returned.
*/
struct socket *sockfd_lookup(int fd, int *err)
{
struct file *file;
struct socket *sock;
file = fget(fd);
if (!file) {
*err = -EBADF;
return NULL;
}
sock = sock_from_file(file);
if (!sock) {
*err = -ENOTSOCK;
fput(file);
}
return sock;
}
EXPORT_SYMBOL(sockfd_lookup);
static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
{
struct fd f = fdget(fd);
struct socket *sock;
*err = -EBADF;
if (f.file) {
sock = sock_from_file(f.file);
if (likely(sock)) {
*fput_needed = f.flags & FDPUT_FPUT;
return sock;
}
*err = -ENOTSOCK;
fdput(f);
}
return NULL;
}
static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
size_t size)
{
ssize_t len;
ssize_t used = 0;
len = security_inode_listsecurity(d_inode(dentry), buffer, size);
if (len < 0)
return len;
used += len;
if (buffer) {
if (size < used)
return -ERANGE;
buffer += len;
}
len = (XATTR_NAME_SOCKPROTONAME_LEN + 1);
used += len;
if (buffer) {
if (size < used)
return -ERANGE;
memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len);
buffer += len;
}
return used;
}
static int sockfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *iattr)
{
int err = simple_setattr(&nop_mnt_idmap, dentry, iattr);
if (!err && (iattr->ia_valid & ATTR_UID)) {
struct socket *sock = SOCKET_I(d_inode(dentry));
if (sock->sk)
sock->sk->sk_uid = iattr->ia_uid;
else
err = -ENOENT;
}
return err;
}
static const struct inode_operations sockfs_inode_ops = {
.listxattr = sockfs_listxattr,
.setattr = sockfs_setattr,
};
/**
* sock_alloc - allocate a socket
*
* Allocate a new inode and socket object. The two are bound together
* and initialised. The socket is then returned. If we are out of inodes
* NULL is returned. This functions uses GFP_KERNEL internally.
*/
struct socket *sock_alloc(void)
{
struct inode *inode;
struct socket *sock;
inode = new_inode_pseudo(sock_mnt->mnt_sb);
if (!inode)
return NULL;
sock = SOCKET_I(inode);
inode->i_ino = get_next_ino();
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_op = &sockfs_inode_ops;
return sock;
}
EXPORT_SYMBOL(sock_alloc);
static void __sock_release(struct socket *sock, struct inode *inode)
{
const struct proto_ops *ops = READ_ONCE(sock->ops);
if (ops) {
struct module *owner = ops->owner;
if (inode)
inode_lock(inode);
ops->release(sock);
sock->sk = NULL;
if (inode)
inode_unlock(inode);
sock->ops = NULL;
module_put(owner);
}
if (sock->wq.fasync_list)
pr_err("%s: fasync list not empty!\n", __func__);
if (!sock->file) {
iput(SOCK_INODE(sock));
return;
}
sock->file = NULL;
}
/**
* sock_release - close a socket
* @sock: socket to close
*
* The socket is released from the protocol stack if it has a release
* callback, and the inode is then released if the socket is bound to
* an inode not a file.
*/
void sock_release(struct socket *sock)
{
__sock_release(sock, NULL);
}
EXPORT_SYMBOL(sock_release);
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
{
u8 flags = *tx_flags;
if (tsflags & SOF_TIMESTAMPING_TX_HARDWARE) {
flags |= SKBTX_HW_TSTAMP;
/* PTP hardware clocks can provide a free running cycle counter
* as a time base for virtual clocks. Tell driver to use the
* free running cycle counter for timestamp if socket is bound
* to virtual clock.
*/
if (tsflags & SOF_TIMESTAMPING_BIND_PHC)
flags |= SKBTX_HW_TSTAMP_USE_CYCLES;
}
if (tsflags & SOF_TIMESTAMPING_TX_SOFTWARE)
flags |= SKBTX_SW_TSTAMP;
if (tsflags & SOF_TIMESTAMPING_TX_SCHED)
flags |= SKBTX_SCHED_TSTAMP;
*tx_flags = flags;
}
EXPORT_SYMBOL(__sock_tx_timestamp);
INDIRECT_CALLABLE_DECLARE(int inet_sendmsg(struct socket *, struct msghdr *,
size_t));
INDIRECT_CALLABLE_DECLARE(int inet6_sendmsg(struct socket *, struct msghdr *,
size_t));
static noinline void call_trace_sock_send_length(struct sock *sk, int ret,
int flags)
{
trace_sock_send_length(sk, ret, 0);
}
static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
{
int ret = INDIRECT_CALL_INET(READ_ONCE(sock->ops)->sendmsg, inet6_sendmsg,
inet_sendmsg, sock, msg,
msg_data_left(msg));
BUG_ON(ret == -EIOCBQUEUED);
if (trace_sock_send_length_enabled())
call_trace_sock_send_length(sock->sk, ret, 0);
return ret;
}
/**
* sock_sendmsg - send a message through @sock
* @sock: socket
* @msg: message to send
*
* Sends @msg through @sock, passing through LSM.
* Returns the number of bytes sent, or an error code.
*/
int sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
int err = security_socket_sendmsg(sock, msg,
msg_data_left(msg));
return err ?: sock_sendmsg_nosec(sock, msg);
}
EXPORT_SYMBOL(sock_sendmsg);
/**
* kernel_sendmsg - send a message through @sock (kernel-space)
* @sock: socket
* @msg: message header
* @vec: kernel vec
* @num: vec array length
* @size: total message data size
*
* Builds the message data with @vec and sends it through @sock.
* Returns the number of bytes sent, or an error code.
*/
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size);
return sock_sendmsg(sock, msg);
}
EXPORT_SYMBOL(kernel_sendmsg);
/**
* kernel_sendmsg_locked - send a message through @sock (kernel-space)
* @sk: sock
* @msg: message header
* @vec: output s/g array
* @num: output s/g array length
* @size: total message data size
*
* Builds the message data with @vec and sends it through @sock.
* Returns the number of bytes sent, or an error code.
* Caller must hold @sk.
*/
int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
struct socket *sock = sk->sk_socket;
const struct proto_ops *ops = READ_ONCE(sock->ops);
if (!ops->sendmsg_locked)
return sock_no_sendmsg_locked(sk, msg, size);
iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size);
return ops->sendmsg_locked(sk, msg, msg_data_left(msg));
}
EXPORT_SYMBOL(kernel_sendmsg_locked);
static bool skb_is_err_queue(const struct sk_buff *skb)
{
/* pkt_type of skbs enqueued on the error queue are set to
* PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
* in recvmsg, since skbs received on a local socket will never
* have a pkt_type of PACKET_OUTGOING.
*/
return skb->pkt_type == PACKET_OUTGOING;
}
/* On transmit, software and hardware timestamps are returned independently.
* As the two skb clones share the hardware timestamp, which may be updated
* before the software timestamp is received, a hardware TX timestamp may be
* returned only if there is no software TX timestamp. Ignore false software
* timestamps, which may be made in the __sock_recv_timestamp() call when the
* option SO_TIMESTAMP_OLD(NS) is enabled on the socket, even when the skb has a
* hardware timestamp.
*/
static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp)
{
return skb->tstamp && !false_tstamp && skb_is_err_queue(skb);
}
static ktime_t get_timestamp(struct sock *sk, struct sk_buff *skb, int *if_index)
{
bool cycles = READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC;
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
struct net_device *orig_dev;
ktime_t hwtstamp;
rcu_read_lock();
orig_dev = dev_get_by_napi_id(skb_napi_id(skb));
if (orig_dev) {
*if_index = orig_dev->ifindex;
hwtstamp = netdev_get_tstamp(orig_dev, shhwtstamps, cycles);
} else {
hwtstamp = shhwtstamps->hwtstamp;
}
rcu_read_unlock();
return hwtstamp;
}
static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb,
int if_index)
{
struct scm_ts_pktinfo ts_pktinfo;
struct net_device *orig_dev;
if (!skb_mac_header_was_set(skb))
return;
memset(&ts_pktinfo, 0, sizeof(ts_pktinfo));
if (!if_index) {
rcu_read_lock();
orig_dev = dev_get_by_napi_id(skb_napi_id(skb));
if (orig_dev)
if_index = orig_dev->ifindex;
rcu_read_unlock();
}
ts_pktinfo.if_index = if_index;
ts_pktinfo.pkt_length = skb->len - skb_mac_offset(skb);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_PKTINFO,
sizeof(ts_pktinfo), &ts_pktinfo);
}
/*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
struct scm_timestamping_internal tss;
int empty = 1, false_tstamp = 0;
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
int if_index;
ktime_t hwtstamp;
u32 tsflags;
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
if (need_software_tstamp && skb->tstamp == 0) {
__net_timestamp(skb);
false_tstamp = 1;
}
if (need_software_tstamp) {
if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
if (new_tstamp) {
struct __kernel_sock_timeval tv;
skb_get_new_timestamp(skb, &tv);
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
sizeof(tv), &tv);
} else {
struct __kernel_old_timeval tv;
skb_get_timestamp(skb, &tv);
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
sizeof(tv), &tv);
}
} else {
if (new_tstamp) {
struct __kernel_timespec ts;
skb_get_new_timestampns(skb, &ts);
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
sizeof(ts), &ts);
} else {
struct __kernel_old_timespec ts;
skb_get_timestampns(skb, &ts);
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
sizeof(ts), &ts);
}
}
}
memset(&tss, 0, sizeof(tss));
tsflags = READ_ONCE(sk->sk_tsflags);
if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
(tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
!skb_is_swtx_tstamp(skb, false_tstamp)) {
if_index = 0;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
hwtstamp = get_timestamp(sk, skb, &if_index);
else
hwtstamp = shhwtstamps->hwtstamp;
if (tsflags & SOF_TIMESTAMPING_BIND_PHC)
hwtstamp = ptp_convert_timestamp(&hwtstamp,
READ_ONCE(sk->sk_bind_phc));
if (ktime_to_timespec64_cond(hwtstamp, tss.ts + 2)) {
empty = 0;
if ((tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
!skb_is_err_queue(skb))
put_ts_pktinfo(msg, skb, if_index);
}
}
if (!empty) {
if (sock_flag(sk, SOCK_TSTAMP_NEW))
put_cmsg_scm_timestamping64(msg, &tss);
else
put_cmsg_scm_timestamping(msg, &tss);
if (skb_is_err_queue(skb) && skb->len &&
SKB_EXT_ERR(skb)->opt_stats)
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
skb->len, skb->data);
}
}
EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
#ifdef CONFIG_WIRELESS
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
int ack;
if (!sock_flag(sk, SOCK_WIFI_STATUS))
return;
if (!skb->wifi_acked_valid)
return;
ack = skb->wifi_acked;
put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack);
}
EXPORT_SYMBOL_GPL(__sock_recv_wifi_status);
#endif
static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && SOCK_SKB_CB(skb)->dropcount)
put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL,
sizeof(__u32), &SOCK_SKB_CB(skb)->dropcount);
}
static void sock_recv_mark(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
if (sock_flag(sk, SOCK_RCVMARK) && skb) {
/* We must use a bounce buffer for CONFIG_HARDENED_USERCOPY=y */
__u32 mark = skb->mark;
put_cmsg(msg, SOL_SOCKET, SO_MARK, sizeof(__u32), &mark);
}
}
void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
sock_recv_timestamp(msg, sk, skb);
sock_recv_drops(msg, sk, skb);
sock_recv_mark(msg, sk, skb);
}
EXPORT_SYMBOL_GPL(__sock_recv_cmsgs);
INDIRECT_CALLABLE_DECLARE(int inet_recvmsg(struct socket *, struct msghdr *,
size_t, int));
INDIRECT_CALLABLE_DECLARE(int inet6_recvmsg(struct socket *, struct msghdr *,
size_t, int));
static noinline void call_trace_sock_recv_length(struct sock *sk, int ret, int flags)
{
trace_sock_recv_length(sk, ret, flags);
}
static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
int flags)
{
int ret = INDIRECT_CALL_INET(READ_ONCE(sock->ops)->recvmsg,
inet6_recvmsg,
inet_recvmsg, sock, msg,
msg_data_left(msg), flags);
if (trace_sock_recv_length_enabled())
call_trace_sock_recv_length(sock->sk, ret, flags);
return ret;
}
/**
* sock_recvmsg - receive a message from @sock
* @sock: socket
* @msg: message to receive
* @flags: message flags
*
* Receives @msg from @sock, passing through LSM. Returns the total number
* of bytes received, or an error.
*/
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
{
int err = security_socket_recvmsg(sock, msg, msg_data_left(msg), flags);
return err ?: sock_recvmsg_nosec(sock, msg, flags);
}
EXPORT_SYMBOL(sock_recvmsg);
/**
* kernel_recvmsg - Receive a message from a socket (kernel space)
* @sock: The socket to receive the message from
* @msg: Received message
* @vec: Input s/g array for message data
* @num: Size of input s/g array
* @size: Number of bytes to read
* @flags: Message flags (MSG_DONTWAIT, etc...)
*
* On return the msg structure contains the scatter/gather array passed in the
* vec argument. The array is modified so that it consists of the unfilled
* portion of the original array.
*
* The returned value is the total number of bytes received, or an error.
*/
int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size, int flags)
{
msg->msg_control_is_user = false;
iov_iter_kvec(&msg->msg_iter, ITER_DEST, vec, num, size);
return sock_recvmsg(sock, msg, flags);
}
EXPORT_SYMBOL(kernel_recvmsg);
static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct socket *sock = file->private_data;
const struct proto_ops *ops;
ops = READ_ONCE(sock->ops);
if (unlikely(!ops->splice_read))
return copy_splice_read(file, ppos, pipe, len, flags);
return ops->splice_read(sock, ppos, pipe, len, flags);
}
static void sock_splice_eof(struct file *file)
{
struct socket *sock = file->private_data;
const struct proto_ops *ops;
ops = READ_ONCE(sock->ops);
if (ops->splice_eof)
ops->splice_eof(sock);
}
static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct socket *sock = file->private_data;
struct msghdr msg = {.msg_iter = *to,
.msg_iocb = iocb};
ssize_t res;
if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
msg.msg_flags = MSG_DONTWAIT;
if (iocb->ki_pos != 0)
return -ESPIPE;
if (!iov_iter_count(to)) /* Match SYS5 behaviour */
return 0;
res = sock_recvmsg(sock, &msg, msg.msg_flags);
*to = msg.msg_iter;
return res;
}
static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct socket *sock = file->private_data;
struct msghdr msg = {.msg_iter = *from,
.msg_iocb = iocb};
ssize_t res;
if (iocb->ki_pos != 0)
return -ESPIPE;
if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
msg.msg_flags = MSG_DONTWAIT;
if (sock->type == SOCK_SEQPACKET)
msg.msg_flags |= MSG_EOR;
res = sock_sendmsg(sock, &msg);
*from = msg.msg_iter;
return res;
}
/*
* Atomic setting of ioctl hooks to avoid race
* with module unload.
*/
static DEFINE_MUTEX(br_ioctl_mutex);
static int (*br_ioctl_hook)(struct net *net, struct net_bridge *br,
unsigned int cmd, struct ifreq *ifr,
void __user *uarg);
void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br,
unsigned int cmd, struct ifreq *ifr,
void __user *uarg))
{
mutex_lock(&br_ioctl_mutex);
br_ioctl_hook = hook;
mutex_unlock(&br_ioctl_mutex);
}
EXPORT_SYMBOL(brioctl_set);
int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
struct ifreq *ifr, void __user *uarg)
{
int err = -ENOPKG;
if (!br_ioctl_hook)
request_module("bridge");
mutex_lock(&br_ioctl_mutex);
if (br_ioctl_hook)
err = br_ioctl_hook(net, br, cmd, ifr, uarg);
mutex_unlock(&br_ioctl_mutex);
return err;
}
static DEFINE_MUTEX(vlan_ioctl_mutex);
static int (*vlan_ioctl_hook) (struct net *, void __user *arg);
void vlan_ioctl_set(int (*hook) (struct net *, void __user *))
{
mutex_lock(&vlan_ioctl_mutex);
vlan_ioctl_hook = hook;
mutex_unlock(&vlan_ioctl_mutex);
}
EXPORT_SYMBOL(vlan_ioctl_set);
static long sock_do_ioctl(struct net *net, struct socket *sock,
unsigned int cmd, unsigned long arg)
{
const struct proto_ops *ops = READ_ONCE(sock->ops);
struct ifreq ifr;
bool need_copyout;
int err;
void __user *argp = (void __user *)arg;
void __user *data;
err = ops->ioctl(sock, cmd, arg);
/*
* If this ioctl is unknown try to hand it down
* to the NIC driver.
*/
if (err != -ENOIOCTLCMD)
return err;
if (!is_socket_ioctl_cmd(cmd))
return -ENOTTY;
if (get_user_ifreq(&ifr, &data, argp))
return -EFAULT;
err = dev_ioctl(net, cmd, &ifr, data, &need_copyout);
if (!err && need_copyout)
if (put_user_ifreq(&ifr, argp))
return -EFAULT;
return err;
}
/*
* With an ioctl, arg may well be a user mode pointer, but we don't know
* what to do with it - that's up to the protocol still.
*/
static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
const struct proto_ops *ops;
struct socket *sock;
struct sock *sk;
void __user *argp = (void __user *)arg;
int pid, err;
struct net *net;
sock = file->private_data;
ops = READ_ONCE(sock->ops);
sk = sock->sk;
net = sock_net(sk);
if (unlikely(cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))) {
struct ifreq ifr;
void __user *data;
bool need_copyout;
if (get_user_ifreq(&ifr, &data, argp))
return -EFAULT;
err = dev_ioctl(net, cmd, &ifr, data, &need_copyout);
if (!err && need_copyout)
if (put_user_ifreq(&ifr, argp))
return -EFAULT;
} else
#ifdef CONFIG_WEXT_CORE
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
err = wext_handle_ioctl(net, cmd, argp);
} else
#endif
switch (cmd) {
case FIOSETOWN:
case SIOCSPGRP:
err = -EFAULT;
if (get_user(pid, (int __user *)argp))
break;
err = f_setown(sock->file, pid, 1);
break;
case FIOGETOWN:
case SIOCGPGRP:
err = put_user(f_getown(sock->file),
(int __user *)argp);
break;
case SIOCGIFBR:
case SIOCSIFBR:
case SIOCBRADDBR:
case SIOCBRDELBR:
err = br_ioctl_call(net, NULL, cmd, NULL, argp);
break;
case SIOCGIFVLAN:
case SIOCSIFVLAN:
err = -ENOPKG;
if (!vlan_ioctl_hook)
request_module("8021q");
mutex_lock(&vlan_ioctl_mutex);
if (vlan_ioctl_hook)
err = vlan_ioctl_hook(net, argp);
mutex_unlock(&vlan_ioctl_mutex);
break;
case SIOCGSKNS:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = open_related_ns(&net->ns, get_net_ns);
break;
case SIOCGSTAMP_OLD:
case SIOCGSTAMPNS_OLD:
if (!ops->gettstamp) {
err = -ENOIOCTLCMD;
break;
}
err = ops->gettstamp(sock, argp,
cmd == SIOCGSTAMP_OLD,
!IS_ENABLED(CONFIG_64BIT));
break;
case SIOCGSTAMP_NEW:
case SIOCGSTAMPNS_NEW:
if (!ops->gettstamp) {
err = -ENOIOCTLCMD;
break;
}
err = ops->gettstamp(sock, argp,
cmd == SIOCGSTAMP_NEW,
false);
break;
case SIOCGIFCONF:
err = dev_ifconf(net, argp);
break;
default:
err = sock_do_ioctl(net, sock, cmd, arg);
break;
}
return err;
}
/**
* sock_create_lite - creates a socket
* @family: protocol family (AF_INET, ...)
* @type: communication type (SOCK_STREAM, ...)
* @protocol: protocol (0, ...)
* @res: new socket
*
* Creates a new socket and assigns it to @res, passing through LSM.
* The new socket initialization is not complete, see kernel_accept().
* Returns 0 or an error. On failure @res is set to %NULL.
* This function internally uses GFP_KERNEL.
*/
int sock_create_lite(int family, int type, int protocol, struct socket **res)
{
int err;
struct socket *sock = NULL;
err = security_socket_create(family, type, protocol, 1);
if (err)
goto out;
sock = sock_alloc();
if (!sock) {
err = -ENOMEM;
goto out;
}
sock->type = type;
err = security_socket_post_create(sock, family, type, protocol, 1);
if (err)
goto out_release;
out:
*res = sock;
return err;
out_release:
sock_release(sock);
sock = NULL;
goto out;
}
EXPORT_SYMBOL(sock_create_lite);
/* No kernel lock held - perfect */
static __poll_t sock_poll(struct file *file, poll_table *wait)
{
struct socket *sock = file->private_data;
const struct proto_ops *ops = READ_ONCE(sock->ops);
__poll_t events = poll_requested_events(wait), flag = 0;
if (!ops->poll)
return 0;
if (sk_can_busy_loop(sock->sk)) {
/* poll once if requested by the syscall */
if (events & POLL_BUSY_LOOP)
sk_busy_loop(sock->sk, 1);
/* if this socket can poll_ll, tell the system call */
flag = POLL_BUSY_LOOP;
}
return ops->poll(file, sock, wait) | flag;
}
static int sock_mmap(struct file *file, struct vm_area_struct *vma)
{
struct socket *sock = file->private_data;
return READ_ONCE(sock->ops)->mmap(file, sock, vma);
}
static int sock_close(struct inode *inode, struct file *filp)
{
__sock_release(SOCKET_I(inode), inode);
return 0;
}
/*
* Update the socket async list
*
* Fasync_list locking strategy.
*
* 1. fasync_list is modified only under process context socket lock
* i.e. under semaphore.
* 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
* or under socket lock
*/
static int sock_fasync(int fd, struct file *filp, int on)
{
struct socket *sock = filp->private_data;
struct sock *sk = sock->sk;
struct socket_wq *wq = &sock->wq;
if (sk == NULL)
return -EINVAL;
lock_sock(sk);
fasync_helper(fd, filp, on, &wq->fasync_list);
if (!wq->fasync_list)
sock_reset_flag(sk, SOCK_FASYNC);
else
sock_set_flag(sk, SOCK_FASYNC);
release_sock(sk);
return 0;
}
/* This function may be called only under rcu_lock */
int sock_wake_async(struct socket_wq *wq, int how, int band)
{
if (!wq || !wq->fasync_list)
return -1;
switch (how) {
case SOCK_WAKE_WAITD:
if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags))
break;
goto call_kill;
case SOCK_WAKE_SPACE:
if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
break;
fallthrough;
case SOCK_WAKE_IO:
call_kill:
kill_fasync(&wq->fasync_list, SIGIO, band);
break;
case SOCK_WAKE_URG:
kill_fasync(&wq->fasync_list, SIGURG, band);
}
return 0;
}
EXPORT_SYMBOL(sock_wake_async);
/**
* __sock_create - creates a socket
* @net: net namespace
* @family: protocol family (AF_INET, ...)
* @type: communication type (SOCK_STREAM, ...)
* @protocol: protocol (0, ...)
* @res: new socket
* @kern: boolean for kernel space sockets
*
* Creates a new socket and assigns it to @res, passing through LSM.
* Returns 0 or an error. On failure @res is set to %NULL. @kern must
* be set to true if the socket resides in kernel space.
* This function internally uses GFP_KERNEL.
*/
int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
{
int err;
struct socket *sock;
const struct net_proto_family *pf;
/*
* Check protocol is in range
*/
if (family < 0 || family >= NPROTO)
return -EAFNOSUPPORT;
if (type < 0 || type >= SOCK_MAX)
return -EINVAL;
/* Compatibility.
This uglymoron is moved from INET layer to here to avoid
deadlock in module load.
*/
if (family == PF_INET && type == SOCK_PACKET) {
pr_info_once("%s uses obsolete (PF_INET,SOCK_PACKET)\n",
current->comm);
family = PF_PACKET;
}
err = security_socket_create(family, type, protocol, kern);
if (err)
return err;
/*
* Allocate the socket and allow the family to set things up. if
* the protocol is 0, the family is instructed to select an appropriate
* default.
*/
sock = sock_alloc();
if (!sock) {
net_warn_ratelimited("socket: no more sockets\n");
return -ENFILE; /* Not exactly a match, but its the
closest posix thing */
}
sock->type = type;
#ifdef CONFIG_MODULES
/* Attempt to load a protocol module if the find failed.
*
* 12/09/1996 Marcin: But! this makes REALLY only sense, if the user
* requested real, full-featured networking support upon configuration.
* Otherwise module support will break!
*/
if (rcu_access_pointer(net_families[family]) == NULL)
request_module("net-pf-%d", family);
#endif
rcu_read_lock();
pf = rcu_dereference(net_families[family]);
err = -EAFNOSUPPORT;
if (!pf)
goto out_release;
/*
* We will call the ->create function, that possibly is in a loadable
* module, so we have to bump that loadable module refcnt first.
*/
if (!try_module_get(pf->owner))
goto out_release;
/* Now protected by module ref count */
rcu_read_unlock();
err = pf->create(net, sock, protocol, kern);
if (err < 0)
goto out_module_put;
/*
* Now to bump the refcnt of the [loadable] module that owns this
* socket at sock_release time we decrement its refcnt.
*/
if (!try_module_get(sock->ops->owner))
goto out_module_busy;
/*
* Now that we're done with the ->create function, the [loadable]
* module can have its refcnt decremented
*/
module_put(pf->owner);
err = security_socket_post_create(sock, family, type, protocol, kern);
if (err)
goto out_sock_release;
*res = sock;
return 0;
out_module_busy:
err = -EAFNOSUPPORT;
out_module_put:
sock->ops = NULL;
module_put(pf->owner);
out_sock_release:
sock_release(sock);
return err;
out_release:
rcu_read_unlock();
goto out_sock_release;
}
EXPORT_SYMBOL(__sock_create);
/**
* sock_create - creates a socket
* @family: protocol family (AF_INET, ...)
* @type: communication type (SOCK_STREAM, ...)
* @protocol: protocol (0, ...)
* @res: new socket
*
* A wrapper around __sock_create().
* Returns 0 or an error. This function internally uses GFP_KERNEL.
*/
int sock_create(int family, int type, int protocol, struct socket **res)
{
return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
}
EXPORT_SYMBOL(sock_create);
/**
* sock_create_kern - creates a socket (kernel space)
* @net: net namespace
* @family: protocol family (AF_INET, ...)
* @type: communication type (SOCK_STREAM, ...)
* @protocol: protocol (0, ...)
* @res: new socket
*
* A wrapper around __sock_create().
* Returns 0 or an error. This function internally uses GFP_KERNEL.
*/
int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
{
return __sock_create(net, family, type, protocol, res, 1);
}
EXPORT_SYMBOL(sock_create_kern);
static struct socket *__sys_socket_create(int family, int type, int protocol)
{
struct socket *sock;
int retval;
/* Check the SOCK_* constants for consistency. */
BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK);
BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK);
BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK);
if ((type & ~SOCK_TYPE_MASK) & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
return ERR_PTR(-EINVAL);
type &= SOCK_TYPE_MASK;
retval = sock_create(family, type, protocol, &sock);
if (retval < 0)
return ERR_PTR(retval);
return sock;
}
struct file *__sys_socket_file(int family, int type, int protocol)
{
struct socket *sock;
int flags;
sock = __sys_socket_create(family, type, protocol);
if (IS_ERR(sock))
return ERR_CAST(sock);
flags = type & ~SOCK_TYPE_MASK;
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
return sock_alloc_file(sock, flags, NULL);
}
/* A hook for bpf progs to attach to and update socket protocol.
*
* A static noinline declaration here could cause the compiler to
* optimize away the function. A global noinline declaration will
* keep the definition, but may optimize away the callsite.
* Therefore, __weak is needed to ensure that the call is still
* emitted, by telling the compiler that we don't know what the
* function might eventually be.
*
* __diag_* below are needed to dismiss the missing prototype warning.
*/
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"A fmod_ret entry point for BPF programs");
__weak noinline int update_socket_protocol(int family, int type, int protocol)
{
return protocol;
}
__diag_pop();
int __sys_socket(int family, int type, int protocol)
{
struct socket *sock;
int flags;
sock = __sys_socket_create(family, type,
update_socket_protocol(family, type, protocol));
if (IS_ERR(sock))
return PTR_ERR(sock);
flags = type & ~SOCK_TYPE_MASK;
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
}
SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
{
return __sys_socket(family, type, protocol);
}
/*
* Create a pair of connected sockets.
*/
int __sys_socketpair(int family, int type, int protocol, int __user *usockvec)
{
struct socket *sock1, *sock2;
int fd1, fd2, err;
struct file *newfile1, *newfile2;
int flags;
flags = type & ~SOCK_TYPE_MASK;
if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
return -EINVAL;
type &= SOCK_TYPE_MASK;
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
/*
* reserve descriptors and make sure we won't fail
* to return them to userland.
*/
fd1 = get_unused_fd_flags(flags);
if (unlikely(fd1 < 0))
return fd1;
fd2 = get_unused_fd_flags(flags);
if (unlikely(fd2 < 0)) {
put_unused_fd(fd1);
return fd2;
}
err = put_user(fd1, &usockvec[0]);
if (err)
goto out;
err = put_user(fd2, &usockvec[1]);
if (err)
goto out;
/*
* Obtain the first socket and check if the underlying protocol
* supports the socketpair call.
*/
err = sock_create(family, type, protocol, &sock1);
if (unlikely(err < 0))
goto out;
err = sock_create(family, type, protocol, &sock2);
if (unlikely(err < 0)) {
sock_release(sock1);
goto out;
}
err = security_socket_socketpair(sock1, sock2);
if (unlikely(err)) {
sock_release(sock2);
sock_release(sock1);
goto out;
}
err = READ_ONCE(sock1->ops)->socketpair(sock1, sock2);
if (unlikely(err < 0)) {
sock_release(sock2);
sock_release(sock1);
goto out;
}
newfile1 = sock_alloc_file(sock1, flags, NULL);
if (IS_ERR(newfile1)) {
err = PTR_ERR(newfile1);
sock_release(sock2);
goto out;
}
newfile2 = sock_alloc_file(sock2, flags, NULL);
if (IS_ERR(newfile2)) {
err = PTR_ERR(newfile2);
fput(newfile1);
goto out;
}
audit_fd_pair(fd1, fd2);
fd_install(fd1, newfile1);
fd_install(fd2, newfile2);
return 0;
out:
put_unused_fd(fd2);
put_unused_fd(fd1);
return err;
}
SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
int __user *, usockvec)
{
return __sys_socketpair(family, type, protocol, usockvec);
}
/*
* Bind a name to a socket. Nothing much to do here since it's
* the protocol's responsibility to handle the local address.
*
* We move the socket address to kernel space before we call
* the protocol layer (having also checked the address is ok).
*/
int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
{
struct socket *sock;
struct sockaddr_storage address;
int err, fput_needed;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
err = move_addr_to_kernel(umyaddr, addrlen, &address);
if (!err) {
err = security_socket_bind(sock,
(struct sockaddr *)&address,
addrlen);
if (!err)
err = READ_ONCE(sock->ops)->bind(sock,
(struct sockaddr *)
&address, addrlen);
}
fput_light(sock->file, fput_needed);
}
return err;
}
SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
{
return __sys_bind(fd, umyaddr, addrlen);
}
/*
* Perform a listen. Basically, we allow the protocol to do anything
* necessary for a listen, and if that works, we mark the socket as
* ready for listening.
*/
int __sys_listen(int fd, int backlog)
{
struct socket *sock;
int err, fput_needed;
int somaxconn;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
if ((unsigned int)backlog > somaxconn)
backlog = somaxconn;
err = security_socket_listen(sock, backlog);
if (!err)
err = READ_ONCE(sock->ops)->listen(sock, backlog);
fput_light(sock->file, fput_needed);
}
return err;
}
SYSCALL_DEFINE2(listen, int, fd, int, backlog)
{
return __sys_listen(fd, backlog);
}
struct file *do_accept(struct file *file, unsigned file_flags,
struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags)
{
struct socket *sock, *newsock;
struct file *newfile;
int err, len;
struct sockaddr_storage address;
const struct proto_ops *ops;
sock = sock_from_file(file);
if (!sock)
return ERR_PTR(-ENOTSOCK);
newsock = sock_alloc();
if (!newsock)
return ERR_PTR(-ENFILE);
ops = READ_ONCE(sock->ops);
newsock->type = sock->type;
newsock->ops = ops;
/*
* We don't need try_module_get here, as the listening socket (sock)
* has the protocol module (sock->ops->owner) held.
*/
__module_get(ops->owner);
newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
if (IS_ERR(newfile))
return newfile;
err = security_socket_accept(sock, newsock);
if (err)
goto out_fd;
err = ops->accept(sock, newsock, sock->file->f_flags | file_flags,
false);
if (err < 0)
goto out_fd;
if (upeer_sockaddr) {
len = ops->getname(newsock, (struct sockaddr *)&address, 2);
if (len < 0) {
err = -ECONNABORTED;
goto out_fd;
}
err = move_addr_to_user(&address,
len, upeer_sockaddr, upeer_addrlen);
if (err < 0)
goto out_fd;
}
/* File flags are not inherited via accept() unlike another OSes. */
return newfile;
out_fd:
fput(newfile);
return ERR_PTR(err);
}
static int __sys_accept4_file(struct file *file, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags)
{
struct file *newfile;
int newfd;
if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
return -EINVAL;
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
newfd = get_unused_fd_flags(flags);
if (unlikely(newfd < 0))
return newfd;
newfile = do_accept(file, 0, upeer_sockaddr, upeer_addrlen,
flags);
if (IS_ERR(newfile)) {
put_unused_fd(newfd);
return PTR_ERR(newfile);
}
fd_install(newfd, newfile);
return newfd;
}
/*
* For accept, we attempt to create a new socket, set up the link
* with the client, wake up the client, then return the new
* connected fd. We collect the address of the connector in kernel
* space and move it to user at the very end. This is unclean because
* we open the socket then return an error.
*
* 1003.1g adds the ability to recvmsg() to query connection pending
* status to recvmsg. We need to add that support in a way thats
* clean when we restructure accept also.
*/
int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags)
{
int ret = -EBADF;
struct fd f;
f = fdget(fd);
if (f.file) {
ret = __sys_accept4_file(f.file, upeer_sockaddr,
upeer_addrlen, flags);
fdput(f);
}
return ret;
}
SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
int __user *, upeer_addrlen, int, flags)
{
return __sys_accept4(fd, upeer_sockaddr, upeer_addrlen, flags);
}
SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
int __user *, upeer_addrlen)
{
return __sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
}
/*
* Attempt to connect to a socket with the server address. The address
* is in user space so we verify it is OK and move it to kernel space.
*
* For 1003.1g we need to add clean support for a bind to AF_UNSPEC to
* break bindings
*
* NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and
* other SEQPACKET protocols that take time to connect() as it doesn't
* include the -EINPROGRESS status for such sockets.
*/
int __sys_connect_file(struct file *file, struct sockaddr_storage *address,
int addrlen, int file_flags)
{
struct socket *sock;
int err;
sock = sock_from_file(file);
if (!sock) {
err = -ENOTSOCK;
goto out;
}
err =
security_socket_connect(sock, (struct sockaddr *)address, addrlen);
if (err)
goto out;
err = READ_ONCE(sock->ops)->connect(sock, (struct sockaddr *)address,
addrlen, sock->file->f_flags | file_flags);
out:
return err;
}
int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
{
int ret = -EBADF;
struct fd f;
f = fdget(fd);
if (f.file) {
struct sockaddr_storage address;
ret = move_addr_to_kernel(uservaddr, addrlen, &address);
if (!ret)
ret = __sys_connect_file(f.file, &address, addrlen, 0);
fdput(f);
}
return ret;
}
SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
int, addrlen)
{
return __sys_connect(fd, uservaddr, addrlen);
}
/*
* Get the local address ('name') of a socket object. Move the obtained
* name to user space.
*/
int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
int __user *usockaddr_len)
{
struct socket *sock;
struct sockaddr_storage address;
int err, fput_needed;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
err = security_socket_getsockname(sock);
if (err)
goto out_put;
err = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 0);
if (err < 0)
goto out_put;
/* "err" is actually length in this case */
err = move_addr_to_user(&address, err, usockaddr, usockaddr_len);
out_put:
fput_light(sock->file, fput_needed);
out:
return err;
}
SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
int __user *, usockaddr_len)
{
return __sys_getsockname(fd, usockaddr, usockaddr_len);
}
/*
* Get the remote address ('name') of a socket object. Move the obtained
* name to user space.
*/
int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
int __user *usockaddr_len)
{
struct socket *sock;
struct sockaddr_storage address;
int err, fput_needed;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock != NULL) {
const struct proto_ops *ops = READ_ONCE(sock->ops);
err = security_socket_getpeername(sock);
if (err) {
fput_light(sock->file, fput_needed);
return err;
}
err = ops->getname(sock, (struct sockaddr *)&address, 1);
if (err >= 0)
/* "err" is actually length in this case */
err = move_addr_to_user(&address, err, usockaddr,
usockaddr_len);
fput_light(sock->file, fput_needed);
}
return err;
}
SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
int __user *, usockaddr_len)
{
return __sys_getpeername(fd, usockaddr, usockaddr_len);
}
/*
* Send a datagram to a given address. We move the address into kernel
* space and check the user space data area is readable before invoking
* the protocol.
*/
int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
struct sockaddr __user *addr, int addr_len)
{
struct socket *sock;
struct sockaddr_storage address;
int err;
struct msghdr msg;
struct iovec iov;
int fput_needed;
err = import_single_range(ITER_SOURCE, buff, len, &iov, &msg.msg_iter);
if (unlikely(err))
return err;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
msg.msg_ubuf = NULL;
if (addr) {
err = move_addr_to_kernel(addr, addr_len, &address);
if (err < 0)
goto out_put;
msg.msg_name = (struct sockaddr *)&address;
msg.msg_namelen = addr_len;
}
flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
err = sock_sendmsg(sock, &msg);
out_put:
fput_light(sock->file, fput_needed);
out:
return err;
}
SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
unsigned int, flags, struct sockaddr __user *, addr,
int, addr_len)
{
return __sys_sendto(fd, buff, len, flags, addr, addr_len);
}
/*
* Send a datagram down a socket.
*/
SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
unsigned int, flags)
{
return __sys_sendto(fd, buff, len, flags, NULL, 0);
}
/*
* Receive a frame from the socket and optionally record the address of the
* sender. We verify the buffers are writable and if needed move the
* sender address from kernel to user space.
*/
int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags,
struct sockaddr __user *addr, int __user *addr_len)
{
struct sockaddr_storage address;
struct msghdr msg = {
/* Save some cycles and don't copy the address if not needed */
.msg_name = addr ? (struct sockaddr *)&address : NULL,
};
struct socket *sock;
struct iovec iov;
int err, err2;
int fput_needed;
err = import_single_range(ITER_DEST, ubuf, size, &iov, &msg.msg_iter);
if (unlikely(err))
return err;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
err = sock_recvmsg(sock, &msg, flags);
if (err >= 0 && addr != NULL) {
err2 = move_addr_to_user(&address,
msg.msg_namelen, addr, addr_len);
if (err2 < 0)
err = err2;
}
fput_light(sock->file, fput_needed);
out:
return err;
}
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
unsigned int, flags, struct sockaddr __user *, addr,
int __user *, addr_len)
{
return __sys_recvfrom(fd, ubuf, size, flags, addr, addr_len);
}
/*
* Receive a datagram from a socket.
*/
SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size,
unsigned int, flags)
{
return __sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
}
static bool sock_use_custom_sol_socket(const struct socket *sock)
{
return test_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags);
}
/*
* Set a socket option. Because we don't know the option lengths we have
* to pass the user mode parameter for the protocols to sort out.
*/
int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
int optlen)
{
sockptr_t optval = USER_SOCKPTR(user_optval);
const struct proto_ops *ops;
char *kernel_optval = NULL;
int err, fput_needed;
struct socket *sock;
if (optlen < 0)
return -EINVAL;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
return err;
err = security_socket_setsockopt(sock, level, optname);
if (err)
goto out_put;
if (!in_compat_syscall())
err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname,
user_optval, &optlen,
&kernel_optval);
if (err < 0)
goto out_put;
if (err > 0) {
err = 0;
goto out_put;
}
if (kernel_optval)
optval = KERNEL_SOCKPTR(kernel_optval);
ops = READ_ONCE(sock->ops);
if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock))
err = sock_setsockopt(sock, level, optname, optval, optlen);
else if (unlikely(!ops->setsockopt))
err = -EOPNOTSUPP;
else
err = ops->setsockopt(sock, level, optname, optval,
optlen);
kfree(kernel_optval);
out_put:
fput_light(sock->file, fput_needed);
return err;
}
SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
char __user *, optval, int, optlen)
{
return __sys_setsockopt(fd, level, optname, optval, optlen);
}
INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
int optname));
/*
* Get a socket option. Because we don't know the option lengths we have
* to pass a user mode parameter for the protocols to sort out.
*/
int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
int __user *optlen)
{
int max_optlen __maybe_unused;
const struct proto_ops *ops;
int err, fput_needed;
struct socket *sock;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
return err;
err = security_socket_getsockopt(sock, level, optname);
if (err)
goto out_put;
if (!in_compat_syscall())
max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen);
ops = READ_ONCE(sock->ops);
if (level == SOL_SOCKET)
err = sock_getsockopt(sock, level, optname, optval, optlen);
else if (unlikely(!ops->getsockopt))
err = -EOPNOTSUPP;
else
err = ops->getsockopt(sock, level, optname, optval,
optlen);
if (!in_compat_syscall())
err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname,
optval, optlen, max_optlen,
err);
out_put:
fput_light(sock->file, fput_needed);
return err;
}
SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
char __user *, optval, int __user *, optlen)
{
return __sys_getsockopt(fd, level, optname, optval, optlen);
}
/*
* Shutdown a socket.
*/
int __sys_shutdown_sock(struct socket *sock, int how)
{
int err;
err = security_socket_shutdown(sock, how);
if (!err)
err = READ_ONCE(sock->ops)->shutdown(sock, how);
return err;
}
int __sys_shutdown(int fd, int how)
{
int err, fput_needed;
struct socket *sock;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock != NULL) {
err = __sys_shutdown_sock(sock, how);
fput_light(sock->file, fput_needed);
}
return err;
}
SYSCALL_DEFINE2(shutdown, int, fd, int, how)
{
return __sys_shutdown(fd, how);
}
/* A couple of helpful macros for getting the address of the 32/64 bit
* fields which are the same type (int / unsigned) on our platforms.
*/
#define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member)
#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
struct used_address {
struct sockaddr_storage name;
unsigned int name_len;
};
int __copy_msghdr(struct msghdr *kmsg,
struct user_msghdr *msg,
struct sockaddr __user **save_addr)
{
ssize_t err;
kmsg->msg_control_is_user = true;
kmsg->msg_get_inq = 0;
kmsg->msg_control_user = msg->msg_control;
kmsg->msg_controllen = msg->msg_controllen;
kmsg->msg_flags = msg->msg_flags;
kmsg->msg_namelen = msg->msg_namelen;
if (!msg->msg_name)
kmsg->msg_namelen = 0;
if (kmsg->msg_namelen < 0)
return -EINVAL;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
if (save_addr)
*save_addr = msg->msg_name;
if (msg->msg_name && kmsg->msg_namelen) {
if (!save_addr) {
err = move_addr_to_kernel(msg->msg_name,
kmsg->msg_namelen,
kmsg->msg_name);
if (err < 0)
return err;
}
} else {
kmsg->msg_name = NULL;
kmsg->msg_namelen = 0;
}
if (msg->msg_iovlen > UIO_MAXIOV)
return -EMSGSIZE;
kmsg->msg_iocb = NULL;
kmsg->msg_ubuf = NULL;
return 0;
}
static int copy_msghdr_from_user(struct msghdr *kmsg,
struct user_msghdr __user *umsg,
struct sockaddr __user **save_addr,
struct iovec **iov)
{
struct user_msghdr msg;
ssize_t err;
if (copy_from_user(&msg, umsg, sizeof(*umsg)))
return -EFAULT;
err = __copy_msghdr(kmsg, &msg, save_addr);
if (err)
return err;
err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE,
msg.msg_iov, msg.msg_iovlen,
UIO_FASTIOV, iov, &kmsg->msg_iter);
return err < 0 ? err : 0;
}
static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
unsigned int flags, struct used_address *used_address,
unsigned int allowed_msghdr_flags)
{
unsigned char ctl[sizeof(struct cmsghdr) + 20]
__aligned(sizeof(__kernel_size_t));
/* 20 is size of ipv6_pktinfo */
unsigned char *ctl_buf = ctl;
int ctl_len;
ssize_t err;
err = -ENOBUFS;
if (msg_sys->msg_controllen > INT_MAX)
goto out;
flags |= (msg_sys->msg_flags & allowed_msghdr_flags);
ctl_len = msg_sys->msg_controllen;
if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
err =
cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl,
sizeof(ctl));
if (err)
goto out;
ctl_buf = msg_sys->msg_control;
ctl_len = msg_sys->msg_controllen;
} else if (ctl_len) {
BUILD_BUG_ON(sizeof(struct cmsghdr) !=
CMSG_ALIGN(sizeof(struct cmsghdr)));
if (ctl_len > sizeof(ctl)) {
ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
if (ctl_buf == NULL)
goto out;
}
err = -EFAULT;
if (copy_from_user(ctl_buf, msg_sys->msg_control_user, ctl_len))
goto out_freectl;
msg_sys->msg_control = ctl_buf;
msg_sys->msg_control_is_user = false;
}
flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
msg_sys->msg_flags = flags;
if (sock->file->f_flags & O_NONBLOCK)
msg_sys->msg_flags |= MSG_DONTWAIT;
/*
* If this is sendmmsg() and current destination address is same as
* previously succeeded address, omit asking LSM's decision.
* used_address->name_len is initialized to UINT_MAX so that the first
* destination address never matches.
*/
if (used_address && msg_sys->msg_name &&
used_address->name_len == msg_sys->msg_namelen &&
!memcmp(&used_address->name, msg_sys->msg_name,
used_address->name_len)) {
err = sock_sendmsg_nosec(sock, msg_sys);
goto out_freectl;
}
err = sock_sendmsg(sock, msg_sys);
/*
* If this is sendmmsg() and sending to current destination address was
* successful, remember it.
*/
if (used_address && err >= 0) {
used_address->name_len = msg_sys->msg_namelen;
if (msg_sys->msg_name)
memcpy(&used_address->name, msg_sys->msg_name,
used_address->name_len);
}
out_freectl:
if (ctl_buf != ctl)
sock_kfree_s(sock->sk, ctl_buf, ctl_len);
out:
return err;
}
int sendmsg_copy_msghdr(struct msghdr *msg,
struct user_msghdr __user *umsg, unsigned flags,
struct iovec **iov)
{
int err;
if (flags & MSG_CMSG_COMPAT) {
struct compat_msghdr __user *msg_compat;
msg_compat = (struct compat_msghdr __user *) umsg;
err = get_compat_msghdr(msg, msg_compat, NULL, iov);
} else {
err = copy_msghdr_from_user(msg, umsg, NULL, iov);
}
if (err < 0)
return err;
return 0;
}
static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags,
struct used_address *used_address,
unsigned int allowed_msghdr_flags)
{
struct sockaddr_storage address;
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
ssize_t err;
msg_sys->msg_name = &address;
err = sendmsg_copy_msghdr(msg_sys, msg, flags, &iov);
if (err < 0)
return err;
err = ____sys_sendmsg(sock, msg_sys, flags, used_address,
allowed_msghdr_flags);
kfree(iov);
return err;
}
/*
* BSD sendmsg interface
*/
long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg,
unsigned int flags)
{
return ____sys_sendmsg(sock, msg, flags, NULL, 0);
}
long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags,
bool forbid_cmsg_compat)
{
int fput_needed, err;
struct msghdr msg_sys;
struct socket *sock;
if (forbid_cmsg_compat && (flags & MSG_CMSG_COMPAT))
return -EINVAL;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0);
fput_light(sock->file, fput_needed);
out:
return err;
}
SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags)
{
return __sys_sendmsg(fd, msg, flags, true);
}
/*
* Linux sendmmsg interface
*/
int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
unsigned int flags, bool forbid_cmsg_compat)
{
int fput_needed, err, datagrams;
struct socket *sock;
struct mmsghdr __user *entry;
struct compat_mmsghdr __user *compat_entry;
struct msghdr msg_sys;
struct used_address used_address;
unsigned int oflags = flags;
if (forbid_cmsg_compat && (flags & MSG_CMSG_COMPAT))
return -EINVAL;
if (vlen > UIO_MAXIOV)
vlen = UIO_MAXIOV;
datagrams = 0;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
return err;
used_address.name_len = UINT_MAX;
entry = mmsg;
compat_entry = (struct compat_mmsghdr __user *)mmsg;
err = 0;
flags |= MSG_BATCH;
while (datagrams < vlen) {
if (datagrams == vlen - 1)
flags = oflags;
if (MSG_CMSG_COMPAT & flags) {
err = ___sys_sendmsg(sock, (struct user_msghdr __user *)compat_entry,
&msg_sys, flags, &used_address, MSG_EOR);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
err = ___sys_sendmsg(sock,
(struct user_msghdr __user *)entry,
&msg_sys, flags, &used_address, MSG_EOR);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
++entry;
}
if (err)
break;
++datagrams;
if (msg_data_left(&msg_sys))
break;
cond_resched();
}
fput_light(sock->file, fput_needed);
/* We only return an error if no datagrams were able to be sent */
if (datagrams != 0)
return datagrams;
return err;
}
SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags)
{
return __sys_sendmmsg(fd, mmsg, vlen, flags, true);
}
int recvmsg_copy_msghdr(struct msghdr *msg,
struct user_msghdr __user *umsg, unsigned flags,
struct sockaddr __user **uaddr,
struct iovec **iov)
{
ssize_t err;
if (MSG_CMSG_COMPAT & flags) {
struct compat_msghdr __user *msg_compat;
msg_compat = (struct compat_msghdr __user *) umsg;
err = get_compat_msghdr(msg, msg_compat, uaddr, iov);
} else {
err = copy_msghdr_from_user(msg, umsg, uaddr, iov);
}
if (err < 0)
return err;
return 0;
}
static int ____sys_recvmsg(struct socket *sock, struct msghdr *msg_sys,
struct user_msghdr __user *msg,
struct sockaddr __user *uaddr,
unsigned int flags, int nosec)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *) msg;
int __user *uaddr_len = COMPAT_NAMELEN(msg);
struct sockaddr_storage addr;
unsigned long cmsg_ptr;
int len;
ssize_t err;
msg_sys->msg_name = &addr;
cmsg_ptr = (unsigned long)msg_sys->msg_control;
msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
/* We assume all kernel code knows the size of sockaddr_storage */
msg_sys->msg_namelen = 0;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
if (unlikely(nosec))
err = sock_recvmsg_nosec(sock, msg_sys, flags);
else
err = sock_recvmsg(sock, msg_sys, flags);
if (err < 0)
goto out;
len = err;
if (uaddr != NULL) {
err = move_addr_to_user(&addr,
msg_sys->msg_namelen, uaddr,
uaddr_len);
if (err < 0)
goto out;
}
err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT),
COMPAT_FLAGS(msg));
if (err)
goto out;
if (MSG_CMSG_COMPAT & flags)
err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr,
&msg_compat->msg_controllen);
else
err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr,
&msg->msg_controllen);
if (err)
goto out;
err = len;
out:
return err;
}
static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags, int nosec)
{
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
/* user mode address pointers */
struct sockaddr __user *uaddr;
ssize_t err;
err = recvmsg_copy_msghdr(msg_sys, msg, flags, &uaddr, &iov);
if (err < 0)
return err;
err = ____sys_recvmsg(sock, msg_sys, msg, uaddr, flags, nosec);
kfree(iov);
return err;
}
/*
* BSD recvmsg interface
*/
long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg,
struct user_msghdr __user *umsg,
struct sockaddr __user *uaddr, unsigned int flags)
{
return ____sys_recvmsg(sock, msg, umsg, uaddr, flags, 0);
}
long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags,
bool forbid_cmsg_compat)
{
int fput_needed, err;
struct msghdr msg_sys;
struct socket *sock;
if (forbid_cmsg_compat && (flags & MSG_CMSG_COMPAT))
return -EINVAL;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
fput_light(sock->file, fput_needed);
out:
return err;
}
SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg,
unsigned int, flags)
{
return __sys_recvmsg(fd, msg, flags, true);
}
/*
* Linux recvmmsg interface
*/
static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
struct timespec64 *timeout)
{
int fput_needed, err, datagrams;
struct socket *sock;
struct mmsghdr __user *entry;
struct compat_mmsghdr __user *compat_entry;
struct msghdr msg_sys;
struct timespec64 end_time;
struct timespec64 timeout64;
if (timeout &&
poll_select_set_timeout(&end_time, timeout->tv_sec,
timeout->tv_nsec))
return -EINVAL;
datagrams = 0;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
return err;
if (likely(!(flags & MSG_ERRQUEUE))) {
err = sock_error(sock->sk);
if (err) {
datagrams = err;
goto out_put;
}
}
entry = mmsg;
compat_entry = (struct compat_mmsghdr __user *)mmsg;
while (datagrams < vlen) {
/*
* No need to ask LSM for more than the first datagram.
*/
if (MSG_CMSG_COMPAT & flags) {
err = ___sys_recvmsg(sock, (struct user_msghdr __user *)compat_entry,
&msg_sys, flags & ~MSG_WAITFORONE,
datagrams);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
err = ___sys_recvmsg(sock,
(struct user_msghdr __user *)entry,
&msg_sys, flags & ~MSG_WAITFORONE,
datagrams);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
++entry;
}
if (err)
break;
++datagrams;
/* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
if (flags & MSG_WAITFORONE)
flags |= MSG_DONTWAIT;
if (timeout) {
ktime_get_ts64(&timeout64);
*timeout = timespec64_sub(end_time, timeout64);
if (timeout->tv_sec < 0) {
timeout->tv_sec = timeout->tv_nsec = 0;
break;
}
/* Timeout, return less than vlen datagrams */
if (timeout->tv_nsec == 0 && timeout->tv_sec == 0)
break;
}
/* Out of band data, return right away */
if (msg_sys.msg_flags & MSG_OOB)
break;
cond_resched();
}
if (err == 0)
goto out_put;
if (datagrams == 0) {
datagrams = err;
goto out_put;
}
/*
* We may return less entries than requested (vlen) if the
* sock is non block and there aren't enough datagrams...
*/
if (err != -EAGAIN) {
/*
* ... or if recvmsg returns an error after we
* received some datagrams, where we record the
* error to return on the next call or if the
* app asks about it using getsockopt(SO_ERROR).
*/
WRITE_ONCE(sock->sk->sk_err, -err);
}
out_put:
fput_light(sock->file, fput_needed);
return datagrams;
}
int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
struct __kernel_timespec __user *timeout,
struct old_timespec32 __user *timeout32)
{
int datagrams;
struct timespec64 timeout_sys;
if (timeout && get_timespec64(&timeout_sys, timeout))
return -EFAULT;
if (timeout32 && get_old_timespec32(&timeout_sys, timeout32))
return -EFAULT;
if (!timeout && !timeout32)
return do_recvmmsg(fd, mmsg, vlen, flags, NULL);
datagrams = do_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys);
if (datagrams <= 0)
return datagrams;
if (timeout && put_timespec64(&timeout_sys, timeout))
datagrams = -EFAULT;
if (timeout32 && put_old_timespec32(&timeout_sys, timeout32))
datagrams = -EFAULT;
return datagrams;
}
SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
struct __kernel_timespec __user *, timeout)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
return __sys_recvmmsg(fd, mmsg, vlen, flags, timeout, NULL);
}
#ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE5(recvmmsg_time32, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
struct old_timespec32 __user *, timeout)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL, timeout);
}
#endif
#ifdef __ARCH_WANT_SYS_SOCKETCALL
/* Argument list sizes for sys_socketcall */
#define AL(x) ((x) * sizeof(unsigned long))
static const unsigned char nargs[21] = {
AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
AL(4), AL(5), AL(4)
};
#undef AL
/*
* System call vectors.
*
* Argument checking cleaned up. Saved 20% in size.
* This function doesn't need to set the kernel lock because
* it is set by the callees.
*/
SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
{
unsigned long a[AUDITSC_ARGS];
unsigned long a0, a1;
int err;
unsigned int len;
if (call < 1 || call > SYS_SENDMMSG)
return -EINVAL;
call = array_index_nospec(call, SYS_SENDMMSG + 1);
len = nargs[call];
if (len > sizeof(a))
return -EINVAL;
/* copy_from_user should be SMP safe. */
if (copy_from_user(a, args, len))
return -EFAULT;
err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
if (err)
return err;
a0 = a[0];
a1 = a[1];
switch (call) {
case SYS_SOCKET:
err = __sys_socket(a0, a1, a[2]);
break;
case SYS_BIND:
err = __sys_bind(a0, (struct sockaddr __user *)a1, a[2]);
break;
case SYS_CONNECT:
err = __sys_connect(a0, (struct sockaddr __user *)a1, a[2]);
break;
case SYS_LISTEN:
err = __sys_listen(a0, a1);
break;
case SYS_ACCEPT:
err = __sys_accept4(a0, (struct sockaddr __user *)a1,
(int __user *)a[2], 0);
break;
case SYS_GETSOCKNAME:
err =
__sys_getsockname(a0, (struct sockaddr __user *)a1,
(int __user *)a[2]);
break;
case SYS_GETPEERNAME:
err =
__sys_getpeername(a0, (struct sockaddr __user *)a1,
(int __user *)a[2]);
break;
case SYS_SOCKETPAIR:
err = __sys_socketpair(a0, a1, a[2], (int __user *)a[3]);
break;
case SYS_SEND:
err = __sys_sendto(a0, (void __user *)a1, a[2], a[3],
NULL, 0);
break;
case SYS_SENDTO:
err = __sys_sendto(a0, (void __user *)a1, a[2], a[3],
(struct sockaddr __user *)a[4], a[5]);
break;
case SYS_RECV:
err = __sys_recvfrom(a0, (void __user *)a1, a[2], a[3],
NULL, NULL);
break;
case SYS_RECVFROM:
err = __sys_recvfrom(a0, (void __user *)a1, a[2], a[3],
(struct sockaddr __user *)a[4],
(int __user *)a[5]);
break;
case SYS_SHUTDOWN:
err = __sys_shutdown(a0, a1);
break;
case SYS_SETSOCKOPT:
err = __sys_setsockopt(a0, a1, a[2], (char __user *)a[3],
a[4]);
break;
case SYS_GETSOCKOPT:
err =
__sys_getsockopt(a0, a1, a[2], (char __user *)a[3],
(int __user *)a[4]);
break;
case SYS_SENDMSG:
err = __sys_sendmsg(a0, (struct user_msghdr __user *)a1,
a[2], true);
break;
case SYS_SENDMMSG:
err = __sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2],
a[3], true);
break;
case SYS_RECVMSG:
err = __sys_recvmsg(a0, (struct user_msghdr __user *)a1,
a[2], true);
break;
case SYS_RECVMMSG:
if (IS_ENABLED(CONFIG_64BIT))
err = __sys_recvmmsg(a0, (struct mmsghdr __user *)a1,
a[2], a[3],
(struct __kernel_timespec __user *)a[4],
NULL);
else
err = __sys_recvmmsg(a0, (struct mmsghdr __user *)a1,
a[2], a[3], NULL,
(struct old_timespec32 __user *)a[4]);
break;
case SYS_ACCEPT4:
err = __sys_accept4(a0, (struct sockaddr __user *)a1,
(int __user *)a[2], a[3]);
break;
default:
err = -EINVAL;
break;
}
return err;
}
#endif /* __ARCH_WANT_SYS_SOCKETCALL */
/**
* sock_register - add a socket protocol handler
* @ops: description of protocol
*
* This function is called by a protocol handler that wants to
* advertise its address family, and have it linked into the
* socket interface. The value ops->family corresponds to the
* socket system call protocol family.
*/
int sock_register(const struct net_proto_family *ops)
{
int err;
if (ops->family >= NPROTO) {
pr_crit("protocol %d >= NPROTO(%d)\n", ops->family, NPROTO);
return -ENOBUFS;
}
spin_lock(&net_family_lock);
if (rcu_dereference_protected(net_families[ops->family],
lockdep_is_held(&net_family_lock)))
err = -EEXIST;
else {
rcu_assign_pointer(net_families[ops->family], ops);
err = 0;
}
spin_unlock(&net_family_lock);
pr_info("NET: Registered %s protocol family\n", pf_family_names[ops->family]);
return err;
}
EXPORT_SYMBOL(sock_register);
/**
* sock_unregister - remove a protocol handler
* @family: protocol family to remove
*
* This function is called by a protocol handler that wants to
* remove its address family, and have it unlinked from the
* new socket creation.
*
* If protocol handler is a module, then it can use module reference
* counts to protect against new references. If protocol handler is not
* a module then it needs to provide its own protection in
* the ops->create routine.
*/
void sock_unregister(int family)
{
BUG_ON(family < 0 || family >= NPROTO);
spin_lock(&net_family_lock);
RCU_INIT_POINTER(net_families[family], NULL);
spin_unlock(&net_family_lock);
synchronize_rcu();
pr_info("NET: Unregistered %s protocol family\n", pf_family_names[family]);
}
EXPORT_SYMBOL(sock_unregister);
bool sock_is_registered(int family)
{
return family < NPROTO && rcu_access_pointer(net_families[family]);
}
static int __init sock_init(void)
{
int err;
/*
* Initialize the network sysctl infrastructure.
*/
err = net_sysctl_init();
if (err)
goto out;
/*
* Initialize skbuff SLAB cache
*/
skb_init();
/*
* Initialize the protocols module.
*/
init_inodecache();
err = register_filesystem(&sock_fs_type);
if (err)
goto out;
sock_mnt = kern_mount(&sock_fs_type);
if (IS_ERR(sock_mnt)) {
err = PTR_ERR(sock_mnt);
goto out_mount;
}
/* The real protocol initialization is performed in later initcalls.
*/
#ifdef CONFIG_NETFILTER
err = netfilter_init();
if (err)
goto out;
#endif
ptp_classifier_init();
out:
return err;
out_mount:
unregister_filesystem(&sock_fs_type);
goto out;
}
core_initcall(sock_init); /* early initcall */
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
seq_printf(seq, "sockets: used %d\n",
sock_inuse_get(seq->private));
}
#endif /* CONFIG_PROC_FS */
/* Handle the fact that while struct ifreq has the same *layout* on
* 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
* which are handled elsewhere, it still has different *size* due to
* ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
* resulting in struct ifreq being 32 and 40 bytes respectively).
* As a result, if the struct happens to be at the end of a page and
* the next page isn't readable/writable, we get a fault. To prevent
* that, copy back and forth to the full size.
*/
int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg)
{
if (in_compat_syscall()) {
struct compat_ifreq *ifr32 = (struct compat_ifreq *)ifr;
memset(ifr, 0, sizeof(*ifr));
if (copy_from_user(ifr32, arg, sizeof(*ifr32)))
return -EFAULT;
if (ifrdata)
*ifrdata = compat_ptr(ifr32->ifr_data);
return 0;
}
if (copy_from_user(ifr, arg, sizeof(*ifr)))
return -EFAULT;
if (ifrdata)
*ifrdata = ifr->ifr_data;
return 0;
}
EXPORT_SYMBOL(get_user_ifreq);
int put_user_ifreq(struct ifreq *ifr, void __user *arg)
{
size_t size = sizeof(*ifr);
if (in_compat_syscall())
size = sizeof(struct compat_ifreq);
if (copy_to_user(arg, ifr, size))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL(put_user_ifreq);
#ifdef CONFIG_COMPAT
static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
{
compat_uptr_t uptr32;
struct ifreq ifr;
void __user *saved;
int err;
if (get_user_ifreq(&ifr, NULL, uifr32))
return -EFAULT;
if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu))
return -EFAULT;
saved = ifr.ifr_settings.ifs_ifsu.raw_hdlc;
ifr.ifr_settings.ifs_ifsu.raw_hdlc = compat_ptr(uptr32);
err = dev_ioctl(net, SIOCWANDEV, &ifr, NULL, NULL);
if (!err) {
ifr.ifr_settings.ifs_ifsu.raw_hdlc = saved;
if (put_user_ifreq(&ifr, uifr32))
err = -EFAULT;
}
return err;
}
/* Handle ioctls that use ifreq::ifr_data and just need struct ifreq converted */
static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
struct compat_ifreq __user *u_ifreq32)
{
struct ifreq ifreq;
void __user *data;
if (!is_socket_ioctl_cmd(cmd))
return -ENOTTY;
if (get_user_ifreq(&ifreq, &data, u_ifreq32))
return -EFAULT;
ifreq.ifr_data = data;
return dev_ioctl(net, cmd, &ifreq, data, NULL);
}
static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
unsigned int cmd, unsigned long arg)
{
void __user *argp = compat_ptr(arg);
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
const struct proto_ops *ops;
if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
return sock_ioctl(file, cmd, (unsigned long)argp);
switch (cmd) {
case SIOCWANDEV:
return compat_siocwandev(net, argp);
case SIOCGSTAMP_OLD:
case SIOCGSTAMPNS_OLD:
ops = READ_ONCE(sock->ops);
if (!ops->gettstamp)
return -ENOIOCTLCMD;
return ops->gettstamp(sock, argp, cmd == SIOCGSTAMP_OLD,
!COMPAT_USE_64BIT_TIME);
case SIOCETHTOOL:
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
case SIOCSHWTSTAMP:
case SIOCGHWTSTAMP:
return compat_ifr_data_ioctl(net, cmd, argp);
case FIOSETOWN:
case SIOCSPGRP:
case FIOGETOWN:
case SIOCGPGRP:
case SIOCBRADDBR:
case SIOCBRDELBR:
case SIOCGIFVLAN:
case SIOCSIFVLAN:
case SIOCGSKNS:
case SIOCGSTAMP_NEW:
case SIOCGSTAMPNS_NEW:
case SIOCGIFCONF:
case SIOCSIFBR:
case SIOCGIFBR:
return sock_ioctl(file, cmd, arg);
case SIOCGIFFLAGS:
case SIOCSIFFLAGS:
case SIOCGIFMAP:
case SIOCSIFMAP:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
case SIOCGIFMTU:
case SIOCSIFMTU:
case SIOCGIFMEM:
case SIOCSIFMEM:
case SIOCGIFHWADDR:
case SIOCSIFHWADDR:
case SIOCADDMULTI:
case SIOCDELMULTI:
case SIOCGIFINDEX:
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCSIFHWBROADCAST:
case SIOCDIFADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCSIFPFLAGS:
case SIOCGIFPFLAGS:
case SIOCGIFTXQLEN:
case SIOCSIFTXQLEN:
case SIOCBRADDIF:
case SIOCBRDELIF:
case SIOCGIFNAME:
case SIOCSIFNAME:
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
case SIOCBONDENSLAVE:
case SIOCBONDRELEASE:
case SIOCBONDSETHWADDR:
case SIOCBONDCHANGEACTIVE:
case SIOCSARP:
case SIOCGARP:
case SIOCDARP:
case SIOCOUTQ:
case SIOCOUTQNSD:
case SIOCATMARK:
return sock_do_ioctl(net, sock, cmd, arg);
}
return -ENOIOCTLCMD;
}
static long compat_sock_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct socket *sock = file->private_data;
const struct proto_ops *ops = READ_ONCE(sock->ops);
int ret = -ENOIOCTLCMD;
struct sock *sk;
struct net *net;
sk = sock->sk;
net = sock_net(sk);
if (ops->compat_ioctl)
ret = ops->compat_ioctl(sock, cmd, arg);
if (ret == -ENOIOCTLCMD &&
(cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST))
ret = compat_wext_handle_ioctl(net, cmd, arg);
if (ret == -ENOIOCTLCMD)
ret = compat_sock_ioctl_trans(file, sock, cmd, arg);
return ret;
}
#endif
/**
* kernel_bind - bind an address to a socket (kernel space)
* @sock: socket
* @addr: address
* @addrlen: length of address
*
* Returns 0 or an error.
*/
int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
return READ_ONCE(sock->ops)->bind(sock, addr, addrlen);
}
EXPORT_SYMBOL(kernel_bind);
/**
* kernel_listen - move socket to listening state (kernel space)
* @sock: socket
* @backlog: pending connections queue size
*
* Returns 0 or an error.
*/
int kernel_listen(struct socket *sock, int backlog)
{
return READ_ONCE(sock->ops)->listen(sock, backlog);
}
EXPORT_SYMBOL(kernel_listen);
/**
* kernel_accept - accept a connection (kernel space)
* @sock: listening socket
* @newsock: new connected socket
* @flags: flags
*
* @flags must be SOCK_CLOEXEC, SOCK_NONBLOCK or 0.
* If it fails, @newsock is guaranteed to be %NULL.
* Returns 0 or an error.
*/
int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
{
struct sock *sk = sock->sk;
const struct proto_ops *ops = READ_ONCE(sock->ops);
int err;
err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
newsock);
if (err < 0)
goto done;
err = ops->accept(sock, *newsock, flags, true);
if (err < 0) {
sock_release(*newsock);
*newsock = NULL;
goto done;
}
(*newsock)->ops = ops;
__module_get(ops->owner);
done:
return err;
}
EXPORT_SYMBOL(kernel_accept);
/**
* kernel_connect - connect a socket (kernel space)
* @sock: socket
* @addr: address
* @addrlen: address length
* @flags: flags (O_NONBLOCK, ...)
*
* For datagram sockets, @addr is the address to which datagrams are sent
* by default, and the only address from which datagrams are received.
* For stream sockets, attempts to connect to @addr.
* Returns 0 or an error code.
*/
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags)
{
struct sockaddr_storage address;
memcpy(&address, addr, addrlen);
return READ_ONCE(sock->ops)->connect(sock, (struct sockaddr *)&address,
addrlen, flags);
}
EXPORT_SYMBOL(kernel_connect);
/**
* kernel_getsockname - get the address which the socket is bound (kernel space)
* @sock: socket
* @addr: address holder
*
* Fills the @addr pointer with the address which the socket is bound.
* Returns the length of the address in bytes or an error code.
*/
int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
{
return READ_ONCE(sock->ops)->getname(sock, addr, 0);
}
EXPORT_SYMBOL(kernel_getsockname);
/**
* kernel_getpeername - get the address which the socket is connected (kernel space)
* @sock: socket
* @addr: address holder
*
* Fills the @addr pointer with the address which the socket is connected.
* Returns the length of the address in bytes or an error code.
*/
int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
{
return READ_ONCE(sock->ops)->getname(sock, addr, 1);
}
EXPORT_SYMBOL(kernel_getpeername);
/**
* kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space)
* @sock: socket
* @how: connection part
*
* Returns 0 or an error.
*/
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
{
return READ_ONCE(sock->ops)->shutdown(sock, how);
}
EXPORT_SYMBOL(kernel_sock_shutdown);
/**
* kernel_sock_ip_overhead - returns the IP overhead imposed by a socket
* @sk: socket
*
* This routine returns the IP overhead imposed by a socket i.e.
* the length of the underlying IP header, depending on whether
* this is an IPv4 or IPv6 socket and the length from IP options turned
* on at the socket. Assumes that the caller has a lock on the socket.
*/
u32 kernel_sock_ip_overhead(struct sock *sk)
{
struct inet_sock *inet;
struct ip_options_rcu *opt;
u32 overhead = 0;
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6_pinfo *np;
struct ipv6_txoptions *optv6 = NULL;
#endif /* IS_ENABLED(CONFIG_IPV6) */
if (!sk)
return overhead;
switch (sk->sk_family) {
case AF_INET:
inet = inet_sk(sk);
overhead += sizeof(struct iphdr);
opt = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
if (opt)
overhead += opt->opt.optlen;
return overhead;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
np = inet6_sk(sk);
overhead += sizeof(struct ipv6hdr);
if (np)
optv6 = rcu_dereference_protected(np->opt,
sock_owned_by_user(sk));
if (optv6)
overhead += (optv6->opt_flen + optv6->opt_nflen);
return overhead;
#endif /* IS_ENABLED(CONFIG_IPV6) */
default: /* Returns 0 overhead if the socket is not ipv4 or ipv6 */
return overhead;
}
}
EXPORT_SYMBOL(kernel_sock_ip_overhead);
| linux-master | net/socket.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This file contains all networking devres helpers.
*/
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
struct net_device_devres {
struct net_device *ndev;
};
static void devm_free_netdev(struct device *dev, void *this)
{
struct net_device_devres *res = this;
free_netdev(res->ndev);
}
struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
unsigned int txqs, unsigned int rxqs)
{
struct net_device_devres *dr;
dr = devres_alloc(devm_free_netdev, sizeof(*dr), GFP_KERNEL);
if (!dr)
return NULL;
dr->ndev = alloc_etherdev_mqs(sizeof_priv, txqs, rxqs);
if (!dr->ndev) {
devres_free(dr);
return NULL;
}
devres_add(dev, dr);
return dr->ndev;
}
EXPORT_SYMBOL(devm_alloc_etherdev_mqs);
static void devm_unregister_netdev(struct device *dev, void *this)
{
struct net_device_devres *res = this;
unregister_netdev(res->ndev);
}
static int netdev_devres_match(struct device *dev, void *this, void *match_data)
{
struct net_device_devres *res = this;
struct net_device *ndev = match_data;
return ndev == res->ndev;
}
/**
* devm_register_netdev - resource managed variant of register_netdev()
* @dev: managing device for this netdev - usually the parent device
* @ndev: device to register
*
* This is a devres variant of register_netdev() for which the unregister
* function will be called automatically when the managing device is
* detached. Note: the net_device used must also be resource managed by
* the same struct device.
*/
int devm_register_netdev(struct device *dev, struct net_device *ndev)
{
struct net_device_devres *dr;
int ret;
/* struct net_device must itself be managed. For now a managed netdev
* can only be allocated by devm_alloc_etherdev_mqs() so the check is
* straightforward.
*/
if (WARN_ON(!devres_find(dev, devm_free_netdev,
netdev_devres_match, ndev)))
return -EINVAL;
dr = devres_alloc(devm_unregister_netdev, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
ret = register_netdev(ndev);
if (ret) {
devres_free(dr);
return ret;
}
dr->ndev = ndev;
devres_add(ndev->dev.parent, dr);
return 0;
}
EXPORT_SYMBOL(devm_register_netdev);
| linux-master | net/devres.c |
// SPDX-License-Identifier: GPL-2.0-only
/* -*- linux-c -*-
* sysctl_net.c: sysctl interface to net subsystem.
*
* Begun April 1, 1996, Mike Shaver.
* Added /proc/sys/net directories for each protocol family. [MS]
*
* Revision 1.2 1996/05/08 20:24:40 shaver
* Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and
* NET_IPV4_IP_FORWARD.
*
*
*/
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/sysctl.h>
#include <linux/nsproxy.h>
#include <net/sock.h>
#ifdef CONFIG_INET
#include <net/ip.h>
#endif
#ifdef CONFIG_NET
#include <linux/if_ether.h>
#endif
static struct ctl_table_set *
net_ctl_header_lookup(struct ctl_table_root *root)
{
return ¤t->nsproxy->net_ns->sysctls;
}
static int is_seen(struct ctl_table_set *set)
{
return ¤t->nsproxy->net_ns->sysctls == set;
}
/* Return standard mode bits for table entry. */
static int net_ctl_permissions(struct ctl_table_header *head,
struct ctl_table *table)
{
struct net *net = container_of(head->set, struct net, sysctls);
/* Allow network administrator to have same access as root. */
if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN)) {
int mode = (table->mode >> 6) & 7;
return (mode << 6) | (mode << 3) | mode;
}
return table->mode;
}
static void net_ctl_set_ownership(struct ctl_table_header *head,
struct ctl_table *table,
kuid_t *uid, kgid_t *gid)
{
struct net *net = container_of(head->set, struct net, sysctls);
kuid_t ns_root_uid;
kgid_t ns_root_gid;
ns_root_uid = make_kuid(net->user_ns, 0);
if (uid_valid(ns_root_uid))
*uid = ns_root_uid;
ns_root_gid = make_kgid(net->user_ns, 0);
if (gid_valid(ns_root_gid))
*gid = ns_root_gid;
}
static struct ctl_table_root net_sysctl_root = {
.lookup = net_ctl_header_lookup,
.permissions = net_ctl_permissions,
.set_ownership = net_ctl_set_ownership,
};
static int __net_init sysctl_net_init(struct net *net)
{
setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen);
return 0;
}
static void __net_exit sysctl_net_exit(struct net *net)
{
retire_sysctl_set(&net->sysctls);
}
static struct pernet_operations sysctl_pernet_ops = {
.init = sysctl_net_init,
.exit = sysctl_net_exit,
};
static struct ctl_table_header *net_header;
__init int net_sysctl_init(void)
{
static struct ctl_table empty[1];
int ret = -ENOMEM;
/* Avoid limitations in the sysctl implementation by
* registering "/proc/sys/net" as an empty directory not in a
* network namespace.
*/
net_header = register_sysctl_sz("net", empty, 0);
if (!net_header)
goto out;
ret = register_pernet_subsys(&sysctl_pernet_ops);
if (ret)
goto out1;
out:
return ret;
out1:
unregister_sysctl_table(net_header);
net_header = NULL;
goto out;
}
/* Verify that sysctls for non-init netns are safe by either:
* 1) being read-only, or
* 2) having a data pointer which points outside of the global kernel/module
* data segment, and rather into the heap where a per-net object was
* allocated.
*/
static void ensure_safe_net_sysctl(struct net *net, const char *path,
struct ctl_table *table, size_t table_size)
{
struct ctl_table *ent;
pr_debug("Registering net sysctl (net %p): %s\n", net, path);
ent = table;
for (size_t i = 0; i < table_size && ent->procname; ent++, i++) {
unsigned long addr;
const char *where;
pr_debug(" procname=%s mode=%o proc_handler=%ps data=%p\n",
ent->procname, ent->mode, ent->proc_handler, ent->data);
/* If it's not writable inside the netns, then it can't hurt. */
if ((ent->mode & 0222) == 0) {
pr_debug(" Not writable by anyone\n");
continue;
}
/* Where does data point? */
addr = (unsigned long)ent->data;
if (is_module_address(addr))
where = "module";
else if (is_kernel_core_data(addr))
where = "kernel";
else
continue;
/* If it is writable and points to kernel/module global
* data, then it's probably a netns leak.
*/
WARN(1, "sysctl %s/%s: data points to %s global data: %ps\n",
path, ent->procname, where, ent->data);
/* Make it "safe" by dropping writable perms */
ent->mode &= ~0222;
}
}
struct ctl_table_header *register_net_sysctl_sz(struct net *net,
const char *path,
struct ctl_table *table,
size_t table_size)
{
int count;
struct ctl_table *entry;
if (!net_eq(net, &init_net))
ensure_safe_net_sysctl(net, path, table, table_size);
entry = table;
for (count = 0 ; count < table_size && entry->procname; entry++, count++)
;
return __register_sysctl_table(&net->sysctls, path, table, count);
}
EXPORT_SYMBOL_GPL(register_net_sysctl_sz);
void unregister_net_sysctl_table(struct ctl_table_header *header)
{
unregister_sysctl_table(header);
}
EXPORT_SYMBOL_GPL(unregister_net_sysctl_table);
| linux-master | net/sysctl_net.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 1996 Mike Shaver ([email protected])
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/spinlock.h>
#include <net/ax25.h>
static int min_ipdefmode[1], max_ipdefmode[] = {1};
static int min_axdefmode[1], max_axdefmode[] = {1};
static int min_backoff[1], max_backoff[] = {2};
static int min_conmode[1], max_conmode[] = {2};
static int min_window[] = {1}, max_window[] = {7};
static int min_ewindow[] = {1}, max_ewindow[] = {63};
static int min_t1[] = {1}, max_t1[] = {30000};
static int min_t2[] = {1}, max_t2[] = {20000};
static int min_t3[1], max_t3[] = {3600000};
static int min_idle[1], max_idle[] = {65535000};
static int min_n2[] = {1}, max_n2[] = {31};
static int min_paclen[] = {1}, max_paclen[] = {512};
static int min_proto[1], max_proto[] = { AX25_PROTO_MAX };
#ifdef CONFIG_AX25_DAMA_SLAVE
static int min_ds_timeout[1], max_ds_timeout[] = {65535000};
#endif
static const struct ctl_table ax25_param_table[] = {
{
.procname = "ip_default_mode",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_ipdefmode,
.extra2 = &max_ipdefmode
},
{
.procname = "ax25_default_mode",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_axdefmode,
.extra2 = &max_axdefmode
},
{
.procname = "backoff_type",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_backoff,
.extra2 = &max_backoff
},
{
.procname = "connect_mode",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_conmode,
.extra2 = &max_conmode
},
{
.procname = "standard_window_size",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_window,
.extra2 = &max_window
},
{
.procname = "extended_window_size",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_ewindow,
.extra2 = &max_ewindow
},
{
.procname = "t1_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_t1,
.extra2 = &max_t1
},
{
.procname = "t2_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_t2,
.extra2 = &max_t2
},
{
.procname = "t3_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_t3,
.extra2 = &max_t3
},
{
.procname = "idle_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_idle,
.extra2 = &max_idle
},
{
.procname = "maximum_retry_count",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_n2,
.extra2 = &max_n2
},
{
.procname = "maximum_packet_length",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_paclen,
.extra2 = &max_paclen
},
{
.procname = "protocol",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_proto,
.extra2 = &max_proto
},
#ifdef CONFIG_AX25_DAMA_SLAVE
{
.procname = "dama_slave_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_ds_timeout,
.extra2 = &max_ds_timeout
},
#endif
{ } /* that's all, folks! */
};
int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
{
char path[sizeof("net/ax25/") + IFNAMSIZ];
int k;
struct ctl_table *table;
table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
for (k = 0; k < AX25_MAX_VALUES; k++)
table[k].data = &ax25_dev->values[k];
snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name);
ax25_dev->sysheader = register_net_sysctl_sz(&init_net, path, table,
ARRAY_SIZE(ax25_param_table));
if (!ax25_dev->sysheader) {
kfree(table);
return -ENOMEM;
}
return 0;
}
void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev)
{
struct ctl_table_header *header = ax25_dev->sysheader;
struct ctl_table *table;
if (header) {
ax25_dev->sysheader = NULL;
table = header->ctl_table_arg;
unregister_net_sysctl_table(header);
kfree(table);
}
}
| linux-master | net/ax25/sysctl_net_ax25.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS ([email protected])
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
* Copyright (C) Joerg Reuter DL1BKE ([email protected])
* Copyright (C) Hans-Joachim Hetscher DD8NE ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* Given a fragment, queue it on the fragment queue and if the fragment
* is complete, send it back to ax25_rx_iframe.
*/
static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
{
struct sk_buff *skbn, *skbo;
if (ax25->fragno != 0) {
if (!(*skb->data & AX25_SEG_FIRST)) {
if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
/* Enqueue fragment */
ax25->fragno = *skb->data & AX25_SEG_REM;
skb_pull(skb, 1); /* skip fragno */
ax25->fraglen += skb->len;
skb_queue_tail(&ax25->frag_queue, skb);
/* Last fragment received ? */
if (ax25->fragno == 0) {
skbn = alloc_skb(AX25_MAX_HEADER_LEN +
ax25->fraglen,
GFP_ATOMIC);
if (!skbn) {
skb_queue_purge(&ax25->frag_queue);
return 1;
}
skb_reserve(skbn, AX25_MAX_HEADER_LEN);
skbn->dev = ax25->ax25_dev->dev;
skb_reset_network_header(skbn);
skb_reset_transport_header(skbn);
/* Copy data from the fragments */
while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
skb_copy_from_linear_data(skbo,
skb_put(skbn, skbo->len),
skbo->len);
kfree_skb(skbo);
}
ax25->fraglen = 0;
if (ax25_rx_iframe(ax25, skbn) == 0)
kfree_skb(skbn);
}
return 1;
}
}
} else {
/* First fragment received */
if (*skb->data & AX25_SEG_FIRST) {
skb_queue_purge(&ax25->frag_queue);
ax25->fragno = *skb->data & AX25_SEG_REM;
skb_pull(skb, 1); /* skip fragno */
ax25->fraglen = skb->len;
skb_queue_tail(&ax25->frag_queue, skb);
return 1;
}
}
return 0;
}
/*
* This is where all valid I frames are sent to, to be dispatched to
* whichever protocol requires them.
*/
int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
int (*func)(struct sk_buff *, ax25_cb *);
unsigned char pid;
int queued = 0;
if (skb == NULL) return 0;
ax25_start_idletimer(ax25);
pid = *skb->data;
if (pid == AX25_P_IP) {
/* working around a TCP bug to keep additional listeners
* happy. TCP re-uses the buffer and destroys the original
* content.
*/
struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
if (skbn != NULL) {
kfree_skb(skb);
skb = skbn;
}
skb_pull(skb, 1); /* Remove PID */
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
skb->dev = ax25->ax25_dev->dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
return 1;
}
if (pid == AX25_P_SEGMENT) {
skb_pull(skb, 1); /* Remove PID */
return ax25_rx_fragment(ax25, skb);
}
if ((func = ax25_protocol_function(pid)) != NULL) {
skb_pull(skb, 1); /* Remove PID */
return (*func)(skb, ax25);
}
if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
ax25->pidincl) {
if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
queued = 1;
else
ax25->condition |= AX25_COND_OWN_RX_BUSY;
}
}
return queued;
}
/*
* Higher level upcall for a LAPB frame
*/
static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
{
int queued = 0;
if (ax25->state == AX25_STATE_0)
return 0;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
queued = ax25_std_frame_in(ax25, skb, type);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (dama || ax25->ax25_dev->dama.slave)
queued = ax25_ds_frame_in(ax25, skb, type);
else
queued = ax25_std_frame_in(ax25, skb, type);
break;
#endif
}
return queued;
}
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
const ax25_address *dev_addr, struct packet_type *ptype)
{
ax25_address src, dest, *next_digi = NULL;
int type = 0, mine = 0, dama;
struct sock *make, *sk;
ax25_digi dp, reverse_dp;
ax25_cb *ax25;
ax25_dev *ax25_dev;
/*
* Process the AX.25/LAPB frame.
*/
skb_reset_transport_header(skb);
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
goto free;
/*
* Parse the address header.
*/
if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
goto free;
/*
* Ours perhaps ?
*/
if (dp.lastrepeat + 1 < dp.ndigi) /* Not yet digipeated completely */
next_digi = &dp.calls[dp.lastrepeat + 1];
/*
* Pull of the AX.25 headers leaving the CTRL/PID bytes
*/
skb_pull(skb, ax25_addr_size(&dp));
/* For our port addresses ? */
if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
mine = 1;
/* Also match on any registered callsign from L3/4 */
if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
mine = 1;
/* UI frame - bypass LAPB processing */
if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
skb_set_transport_header(skb, 2); /* skip control and pid */
ax25_send_to_raw(&dest, skb, skb->data[1]);
if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
goto free;
/* Now we are pointing at the pid byte */
switch (skb->data[1]) {
case AX25_P_IP:
skb_pull(skb,2); /* drop PID/CTRL */
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
break;
case AX25_P_ARP:
skb_pull(skb,2);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_ARP);
netif_rx(skb);
break;
case AX25_P_TEXT:
/* Now find a suitable dgram socket */
sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
if (sk != NULL) {
bh_lock_sock(sk);
if (atomic_read(&sk->sk_rmem_alloc) >=
sk->sk_rcvbuf) {
kfree_skb(skb);
} else {
/*
* Remove the control and PID.
*/
skb_pull(skb, 2);
if (sock_queue_rcv_skb(sk, skb) != 0)
kfree_skb(skb);
}
bh_unlock_sock(sk);
sock_put(sk);
} else {
kfree_skb(skb);
}
break;
default:
kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */
break;
}
return 0;
}
/*
* Is connected mode supported on this device ?
* If not, should we DM the incoming frame (except DMs) or
* silently ignore them. For now we stay quiet.
*/
if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
goto free;
/* LAPB */
/* AX.25 state 1-4 */
ax25_digi_invert(&dp, &reverse_dp);
if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
/*
* Process the frame. If it is queued up internally it
* returns one otherwise we free it immediately. This
* routine itself wakes the user context layers so we do
* no further work
*/
if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
kfree_skb(skb);
ax25_cb_put(ax25);
return 0;
}
/* AX.25 state 0 (disconnected) */
/* a) received not a SABM(E) */
if ((*skb->data & ~AX25_PF) != AX25_SABM &&
(*skb->data & ~AX25_PF) != AX25_SABME) {
/*
* Never reply to a DM. Also ignore any connects for
* addresses that are not our interfaces and not a socket.
*/
if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
ax25_return_dm(dev, &src, &dest, &dp);
goto free;
}
/* b) received SABM(E) */
if (dp.lastrepeat + 1 == dp.ndigi)
sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
else
sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
if (sk != NULL) {
bh_lock_sock(sk);
if (sk_acceptq_is_full(sk) ||
(make = ax25_make_new(sk, ax25_dev)) == NULL) {
if (mine)
ax25_return_dm(dev, &src, &dest, &dp);
kfree_skb(skb);
bh_unlock_sock(sk);
sock_put(sk);
return 0;
}
ax25 = sk_to_ax25(make);
skb_set_owner_r(skb, make);
skb_queue_head(&sk->sk_receive_queue, skb);
make->sk_state = TCP_ESTABLISHED;
sk_acceptq_added(sk);
bh_unlock_sock(sk);
} else {
if (!mine)
goto free;
if ((ax25 = ax25_create_cb()) == NULL) {
ax25_return_dm(dev, &src, &dest, &dp);
goto free;
}
ax25_fillin_cb(ax25, ax25_dev);
}
ax25->source_addr = dest;
ax25->dest_addr = src;
/*
* Sort out any digipeated paths.
*/
if (dp.ndigi && !ax25->digipeat &&
(ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
kfree_skb(skb);
ax25_destroy_socket(ax25);
if (sk)
sock_put(sk);
return 0;
}
if (dp.ndigi == 0) {
kfree(ax25->digipeat);
ax25->digipeat = NULL;
} else {
/* Reverse the source SABM's path */
memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
}
if ((*skb->data & ~AX25_PF) == AX25_SABME) {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
}
ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
#ifdef CONFIG_AX25_DAMA_SLAVE
if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
ax25_dama_on(ax25);
#endif
ax25->state = AX25_STATE_3;
ax25_cb_add(ax25);
ax25_start_heartbeat(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
if (sk) {
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
sock_put(sk);
} else {
free:
kfree_skb(skb);
}
return 0;
}
/*
* Receive an AX.25 frame via a SLIP interface.
*/
int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
skb_orphan(skb);
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(skb);
return 0;
}
if ((*skb->data & 0x0F) != 0) {
kfree_skb(skb); /* Not a KISS data frame */
return 0;
}
skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */
return ax25_rcv(skb, dev, (const ax25_address *)dev->dev_addr, ptype);
}
| linux-master | net/ax25/ax25_in.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS ([email protected])
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
* Copyright (C) Darryl Miles G7LED ([email protected])
* Copyright (C) Steven Whitehouse GW7RRM ([email protected])
* Copyright (C) Joerg Reuter DL1BKE ([email protected])
* Copyright (C) Hans-Joachim Hetscher DD8NE ([email protected])
* Copyright (C) Hans Alblas PE1AYX ([email protected])
* Copyright (C) Frederic Rible F1OAT ([email protected])
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/tcp_states.h>
#include <net/ip.h>
#include <net/arp.h>
HLIST_HEAD(ax25_list);
DEFINE_SPINLOCK(ax25_list_lock);
static const struct proto_ops ax25_proto_ops;
static void ax25_free_sock(struct sock *sk)
{
ax25_cb_put(sk_to_ax25(sk));
}
/*
* Socket removal during an interrupt is now safe.
*/
static void ax25_cb_del(ax25_cb *ax25)
{
spin_lock_bh(&ax25_list_lock);
if (!hlist_unhashed(&ax25->ax25_node)) {
hlist_del_init(&ax25->ax25_node);
ax25_cb_put(ax25);
}
spin_unlock_bh(&ax25_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void ax25_kill_by_device(struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *s;
struct sock *sk;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
ax25_dev->device_up = false;
spin_lock_bh(&ax25_list_lock);
again:
ax25_for_each(s, &ax25_list) {
if (s->ax25_dev == ax25_dev) {
sk = s->sk;
if (!sk) {
spin_unlock_bh(&ax25_list_lock);
ax25_disconnect(s, ENETUNREACH);
s->ax25_dev = NULL;
ax25_cb_del(s);
spin_lock_bh(&ax25_list_lock);
goto again;
}
sock_hold(sk);
spin_unlock_bh(&ax25_list_lock);
lock_sock(sk);
ax25_disconnect(s, ENETUNREACH);
s->ax25_dev = NULL;
if (sk->sk_socket) {
netdev_put(ax25_dev->dev,
&ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
ax25_cb_del(s);
release_sock(sk);
spin_lock_bh(&ax25_list_lock);
sock_put(sk);
/* The entry could have been deleted from the
* list meanwhile and thus the next pointer is
* no longer valid. Play it safe and restart
* the scan. Forward progress is ensured
* because we set s->ax25_dev to NULL and we
* are never passed a NULL 'dev' argument.
*/
goto again;
}
}
spin_unlock_bh(&ax25_list_lock);
}
/*
* Handle device status changes.
*/
static int ax25_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
/* Reject non AX.25 devices */
if (dev->type != ARPHRD_AX25)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
ax25_dev_device_up(dev);
break;
case NETDEV_DOWN:
ax25_kill_by_device(dev);
ax25_rt_device_down(dev);
ax25_dev_device_down(dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
void ax25_cb_add(ax25_cb *ax25)
{
spin_lock_bh(&ax25_list_lock);
ax25_cb_hold(ax25);
hlist_add_head(&ax25->ax25_node, &ax25_list);
spin_unlock_bh(&ax25_list_lock);
}
/*
* Find a socket that wants to accept the SABM we have just
* received.
*/
struct sock *ax25_find_listener(ax25_address *addr, int digi,
struct net_device *dev, int type)
{
ax25_cb *s;
spin_lock(&ax25_list_lock);
ax25_for_each(s, &ax25_list) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
if (s->sk && !ax25cmp(&s->source_addr, addr) &&
s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) {
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
sock_hold(s->sk);
spin_unlock(&ax25_list_lock);
return s->sk;
}
}
}
spin_unlock(&ax25_list_lock);
return NULL;
}
/*
* Find an AX.25 socket given both ends.
*/
struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
int type)
{
struct sock *sk = NULL;
ax25_cb *s;
spin_lock(&ax25_list_lock);
ax25_for_each(s, &ax25_list) {
if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
!ax25cmp(&s->dest_addr, dest_addr) &&
s->sk->sk_type == type) {
sk = s->sk;
sock_hold(sk);
break;
}
}
spin_unlock(&ax25_list_lock);
return sk;
}
/*
* Find an AX.25 control block given both ends. It will only pick up
* floating AX.25 control blocks or non Raw socket bound control blocks.
*/
ax25_cb *ax25_find_cb(const ax25_address *src_addr, ax25_address *dest_addr,
ax25_digi *digi, struct net_device *dev)
{
ax25_cb *s;
spin_lock_bh(&ax25_list_lock);
ax25_for_each(s, &ax25_list) {
if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
continue;
if (s->ax25_dev == NULL)
continue;
if (ax25cmp(&s->source_addr, src_addr) == 0 && ax25cmp(&s->dest_addr, dest_addr) == 0 && s->ax25_dev->dev == dev) {
if (digi != NULL && digi->ndigi != 0) {
if (s->digipeat == NULL)
continue;
if (ax25digicmp(s->digipeat, digi) != 0)
continue;
} else {
if (s->digipeat != NULL && s->digipeat->ndigi != 0)
continue;
}
ax25_cb_hold(s);
spin_unlock_bh(&ax25_list_lock);
return s;
}
}
spin_unlock_bh(&ax25_list_lock);
return NULL;
}
EXPORT_SYMBOL(ax25_find_cb);
void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
{
ax25_cb *s;
struct sk_buff *copy;
spin_lock(&ax25_list_lock);
ax25_for_each(s, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW &&
s->sk->sk_protocol == proto &&
s->ax25_dev->dev == skb->dev &&
atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) {
if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL)
continue;
if (sock_queue_rcv_skb(s->sk, copy) != 0)
kfree_skb(copy);
}
}
spin_unlock(&ax25_list_lock);
}
/*
* Deferred destroy.
*/
void ax25_destroy_socket(ax25_cb *);
/*
* Handler for deferred kills.
*/
static void ax25_destroy_timer(struct timer_list *t)
{
ax25_cb *ax25 = from_timer(ax25, t, dtimer);
struct sock *sk;
sk=ax25->sk;
bh_lock_sock(sk);
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
sock_put(sk);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void ax25_destroy_socket(ax25_cb *ax25)
{
struct sk_buff *skb;
ax25_cb_del(ax25);
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
ax25_clear_queues(ax25); /* Flush the queues */
if (ax25->sk != NULL) {
while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) {
if (skb->sk != ax25->sk) {
/* A pending connection */
ax25_cb *sax25 = sk_to_ax25(skb->sk);
/* Queue the unaccepted socket for death */
sock_orphan(skb->sk);
/* 9A4GL: hack to release unaccepted sockets */
skb->sk->sk_state = TCP_LISTEN;
ax25_start_heartbeat(sax25);
sax25->state = AX25_STATE_0;
}
kfree_skb(skb);
}
skb_queue_purge(&ax25->sk->sk_write_queue);
}
if (ax25->sk != NULL) {
if (sk_has_allocations(ax25->sk)) {
/* Defer: outstanding buffers */
timer_setup(&ax25->dtimer, ax25_destroy_timer, 0);
ax25->dtimer.expires = jiffies + 2 * HZ;
add_timer(&ax25->dtimer);
} else {
struct sock *sk=ax25->sk;
ax25->sk=NULL;
sock_put(sk);
}
} else {
ax25_cb_put(ax25);
}
}
/*
* dl1bke 960311: set parameters for existing AX.25 connections,
* includes a KILL command to abort any connection.
* VERY useful for debugging ;-)
*/
static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
{
struct ax25_ctl_struct ax25_ctl;
ax25_digi digi;
ax25_dev *ax25_dev;
ax25_cb *ax25;
unsigned int k;
int ret = 0;
if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
return -EFAULT;
if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
return -EINVAL;
if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
return -EINVAL;
ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr);
if (!ax25_dev)
return -ENODEV;
digi.ndigi = ax25_ctl.digi_count;
for (k = 0; k < digi.ndigi; k++)
digi.calls[k] = ax25_ctl.digi_addr[k];
ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev);
if (!ax25) {
ax25_dev_put(ax25_dev);
return -ENOTCONN;
}
switch (ax25_ctl.cmd) {
case AX25_KILL:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
#ifdef CONFIG_AX25_DAMA_SLAVE
if (ax25_dev->dama.slave && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
ax25_dama_off(ax25);
#endif
ax25_disconnect(ax25, ENETRESET);
break;
case AX25_WINDOW:
if (ax25->modulus == AX25_MODULUS) {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
goto einval_put;
} else {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
goto einval_put;
}
ax25->window = ax25_ctl.arg;
break;
case AX25_T1:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->rtt = (ax25_ctl.arg * HZ) / 2;
ax25->t1 = ax25_ctl.arg * HZ;
break;
case AX25_T2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t2 = ax25_ctl.arg * HZ;
break;
case AX25_N2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
goto einval_put;
ax25->n2count = 0;
ax25->n2 = ax25_ctl.arg;
break;
case AX25_T3:
if (ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t3 = ax25_ctl.arg * HZ;
break;
case AX25_IDLE:
if (ax25_ctl.arg > ULONG_MAX / (60 * HZ))
goto einval_put;
ax25->idle = ax25_ctl.arg * 60 * HZ;
break;
case AX25_PACLEN:
if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
goto einval_put;
ax25->paclen = ax25_ctl.arg;
break;
default:
goto einval_put;
}
out_put:
ax25_dev_put(ax25_dev);
ax25_cb_put(ax25);
return ret;
einval_put:
ret = -EINVAL;
goto out_put;
}
static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
{
ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2;
ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]);
ax25->t2 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T2]);
ax25->t3 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T3]);
ax25->n2 = ax25_dev->values[AX25_VALUES_N2];
ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN];
ax25->idle = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_IDLE]);
ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF];
if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
}
}
/*
* Fill in a created AX.25 created control block with the default
* values for a particular device.
*/
void ax25_fillin_cb(ax25_cb *ax25, ax25_dev *ax25_dev)
{
ax25->ax25_dev = ax25_dev;
if (ax25->ax25_dev != NULL) {
ax25_fillin_cb_from_dev(ax25, ax25_dev);
return;
}
/*
* No device, use kernel / AX.25 spec default values
*/
ax25->rtt = msecs_to_jiffies(AX25_DEF_T1) / 2;
ax25->t1 = msecs_to_jiffies(AX25_DEF_T1);
ax25->t2 = msecs_to_jiffies(AX25_DEF_T2);
ax25->t3 = msecs_to_jiffies(AX25_DEF_T3);
ax25->n2 = AX25_DEF_N2;
ax25->paclen = AX25_DEF_PACLEN;
ax25->idle = msecs_to_jiffies(AX25_DEF_IDLE);
ax25->backoff = AX25_DEF_BACKOFF;
if (AX25_DEF_AXDEFMODE) {
ax25->modulus = AX25_EMODULUS;
ax25->window = AX25_DEF_EWINDOW;
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = AX25_DEF_WINDOW;
}
}
/*
* Create an empty AX.25 control block.
*/
ax25_cb *ax25_create_cb(void)
{
ax25_cb *ax25;
if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
refcount_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
skb_queue_head_init(&ax25->frag_queue);
skb_queue_head_init(&ax25->ack_queue);
skb_queue_head_init(&ax25->reseq_queue);
ax25_setup_timers(ax25);
ax25_fillin_cb(ax25, NULL);
ax25->state = AX25_STATE_0;
return ax25;
}
/*
* Handling for system calls applied via the various interfaces to an
* AX25 socket object
*/
static int ax25_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
struct net_device *dev;
char devname[IFNAMSIZ];
unsigned int opt;
int res = 0;
if (level != SOL_AX25)
return -ENOPROTOOPT;
if (optlen < sizeof(unsigned int))
return -EINVAL;
if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
return -EFAULT;
lock_sock(sk);
ax25 = sk_to_ax25(sk);
switch (optname) {
case AX25_WINDOW:
if (ax25->modulus == AX25_MODULUS) {
if (opt < 1 || opt > 7) {
res = -EINVAL;
break;
}
} else {
if (opt < 1 || opt > 63) {
res = -EINVAL;
break;
}
}
ax25->window = opt;
break;
case AX25_T1:
if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->rtt = (opt * HZ) >> 1;
ax25->t1 = opt * HZ;
break;
case AX25_T2:
if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->t2 = opt * HZ;
break;
case AX25_N2:
if (opt < 1 || opt > 31) {
res = -EINVAL;
break;
}
ax25->n2 = opt;
break;
case AX25_T3:
if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->t3 = opt * HZ;
break;
case AX25_IDLE:
if (opt > UINT_MAX / (60 * HZ)) {
res = -EINVAL;
break;
}
ax25->idle = opt * 60 * HZ;
break;
case AX25_BACKOFF:
if (opt > 2) {
res = -EINVAL;
break;
}
ax25->backoff = opt;
break;
case AX25_EXTSEQ:
ax25->modulus = opt ? AX25_EMODULUS : AX25_MODULUS;
break;
case AX25_PIDINCL:
ax25->pidincl = opt ? 1 : 0;
break;
case AX25_IAMDIGI:
ax25->iamdigi = opt ? 1 : 0;
break;
case AX25_PACLEN:
if (opt < 16 || opt > 65535) {
res = -EINVAL;
break;
}
ax25->paclen = opt;
break;
case SO_BINDTODEVICE:
if (optlen > IFNAMSIZ - 1)
optlen = IFNAMSIZ - 1;
memset(devname, 0, sizeof(devname));
if (copy_from_sockptr(devname, optval, optlen)) {
res = -EFAULT;
break;
}
if (sk->sk_type == SOCK_SEQPACKET &&
(sock->state != SS_UNCONNECTED ||
sk->sk_state == TCP_LISTEN)) {
res = -EADDRNOTAVAIL;
break;
}
rtnl_lock();
dev = __dev_get_by_name(&init_net, devname);
if (!dev) {
rtnl_unlock();
res = -ENODEV;
break;
}
ax25->ax25_dev = ax25_dev_ax25dev(dev);
if (!ax25->ax25_dev) {
rtnl_unlock();
res = -ENODEV;
break;
}
ax25_fillin_cb(ax25, ax25->ax25_dev);
rtnl_unlock();
break;
default:
res = -ENOPROTOOPT;
}
release_sock(sk);
return res;
}
static int ax25_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
struct ax25_dev *ax25_dev;
char devname[IFNAMSIZ];
void *valptr;
int val = 0;
int maxlen, length;
if (level != SOL_AX25)
return -ENOPROTOOPT;
if (get_user(maxlen, optlen))
return -EFAULT;
if (maxlen < 1)
return -EFAULT;
valptr = &val;
length = min_t(unsigned int, maxlen, sizeof(int));
lock_sock(sk);
ax25 = sk_to_ax25(sk);
switch (optname) {
case AX25_WINDOW:
val = ax25->window;
break;
case AX25_T1:
val = ax25->t1 / HZ;
break;
case AX25_T2:
val = ax25->t2 / HZ;
break;
case AX25_N2:
val = ax25->n2;
break;
case AX25_T3:
val = ax25->t3 / HZ;
break;
case AX25_IDLE:
val = ax25->idle / (60 * HZ);
break;
case AX25_BACKOFF:
val = ax25->backoff;
break;
case AX25_EXTSEQ:
val = (ax25->modulus == AX25_EMODULUS);
break;
case AX25_PIDINCL:
val = ax25->pidincl;
break;
case AX25_IAMDIGI:
val = ax25->iamdigi;
break;
case AX25_PACLEN:
val = ax25->paclen;
break;
case SO_BINDTODEVICE:
ax25_dev = ax25->ax25_dev;
if (ax25_dev != NULL && ax25_dev->dev != NULL) {
strscpy(devname, ax25_dev->dev->name, sizeof(devname));
length = strlen(devname) + 1;
} else {
*devname = '\0';
length = 1;
}
valptr = devname;
break;
default:
release_sock(sk);
return -ENOPROTOOPT;
}
release_sock(sk);
if (put_user(length, optlen))
return -EFAULT;
return copy_to_user(optval, valptr, length) ? -EFAULT : 0;
}
static int ax25_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int res = 0;
lock_sock(sk);
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) {
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
goto out;
}
res = -EOPNOTSUPP;
out:
release_sock(sk);
return res;
}
/*
* XXX: when creating ax25_sock we should update the .obj_size setting
* below.
*/
static struct proto ax25_proto = {
.name = "AX25",
.owner = THIS_MODULE,
.obj_size = sizeof(struct ax25_sock),
};
static int ax25_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
ax25_cb *ax25;
if (protocol < 0 || protocol > U8_MAX)
return -EINVAL;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
switch (sock->type) {
case SOCK_DGRAM:
if (protocol == 0 || protocol == PF_AX25)
protocol = AX25_P_TEXT;
break;
case SOCK_SEQPACKET:
switch (protocol) {
case 0:
case PF_AX25: /* For CLX */
protocol = AX25_P_TEXT;
break;
case AX25_P_SEGMENT:
#ifdef CONFIG_INET
case AX25_P_ARP:
case AX25_P_IP:
#endif
#ifdef CONFIG_NETROM
case AX25_P_NETROM:
#endif
#ifdef CONFIG_ROSE
case AX25_P_ROSE:
#endif
return -ESOCKTNOSUPPORT;
#ifdef CONFIG_NETROM_MODULE
case AX25_P_NETROM:
if (ax25_protocol_is_registered(AX25_P_NETROM))
return -ESOCKTNOSUPPORT;
break;
#endif
#ifdef CONFIG_ROSE_MODULE
case AX25_P_ROSE:
if (ax25_protocol_is_registered(AX25_P_ROSE))
return -ESOCKTNOSUPPORT;
break;
#endif
default:
break;
}
break;
case SOCK_RAW:
if (!capable(CAP_NET_RAW))
return -EPERM;
break;
default:
return -ESOCKTNOSUPPORT;
}
sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, kern);
if (sk == NULL)
return -ENOMEM;
ax25 = ax25_sk(sk)->cb = ax25_create_cb();
if (!ax25) {
sk_free(sk);
return -ENOMEM;
}
sock_init_data(sock, sk);
sk->sk_destruct = ax25_free_sock;
sock->ops = &ax25_proto_ops;
sk->sk_protocol = protocol;
ax25->sk = sk;
return 0;
}
struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
{
struct sock *sk;
ax25_cb *ax25, *oax25;
sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot, 0);
if (sk == NULL)
return NULL;
if ((ax25 = ax25_create_cb()) == NULL) {
sk_free(sk);
return NULL;
}
switch (osk->sk_type) {
case SOCK_DGRAM:
break;
case SOCK_SEQPACKET:
break;
default:
sk_free(sk);
ax25_cb_put(ax25);
return NULL;
}
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
oax25 = sk_to_ax25(osk);
ax25->modulus = oax25->modulus;
ax25->backoff = oax25->backoff;
ax25->pidincl = oax25->pidincl;
ax25->iamdigi = oax25->iamdigi;
ax25->rtt = oax25->rtt;
ax25->t1 = oax25->t1;
ax25->t2 = oax25->t2;
ax25->t3 = oax25->t3;
ax25->n2 = oax25->n2;
ax25->idle = oax25->idle;
ax25->paclen = oax25->paclen;
ax25->window = oax25->window;
ax25->ax25_dev = ax25_dev;
ax25->source_addr = oax25->source_addr;
if (oax25->digipeat != NULL) {
ax25->digipeat = kmemdup(oax25->digipeat, sizeof(ax25_digi),
GFP_ATOMIC);
if (ax25->digipeat == NULL) {
sk_free(sk);
ax25_cb_put(ax25);
return NULL;
}
}
ax25_sk(sk)->cb = ax25;
sk->sk_destruct = ax25_free_sock;
ax25->sk = sk;
return sk;
}
static int ax25_release(struct socket *sock)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
ax25_dev *ax25_dev;
if (sk == NULL)
return 0;
sock_hold(sk);
lock_sock(sk);
sock_orphan(sk);
ax25 = sk_to_ax25(sk);
ax25_dev = ax25->ax25_dev;
if (sk->sk_type == SOCK_SEQPACKET) {
switch (ax25->state) {
case AX25_STATE_0:
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
}
ax25_destroy_socket(ax25);
break;
case AX25_STATE_1:
case AX25_STATE_2:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
ax25_destroy_socket(ax25);
break;
case AX25_STATE_3:
case AX25_STATE_4:
ax25_clear_queues(ax25);
ax25->n2count = 0;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_send_control(ax25,
AX25_DISC,
AX25_POLLON,
AX25_COMMAND);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
break;
#endif
}
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25->state = AX25_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
} else {
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
ax25_destroy_socket(ax25);
}
if (ax25_dev) {
if (!ax25_dev->device_up) {
del_timer_sync(&ax25->timer);
del_timer_sync(&ax25->t1timer);
del_timer_sync(&ax25->t2timer);
del_timer_sync(&ax25->t3timer);
del_timer_sync(&ax25->idletimer);
}
netdev_put(ax25_dev->dev, &ax25->dev_tracker);
ax25_dev_put(ax25_dev);
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
/*
* We support a funny extension here so you can (as root) give any callsign
* digipeated via a local address as source. This hack is obsolete now
* that we've implemented support for SO_BINDTODEVICE. It is however small
* and trivially backward compatible.
*/
static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
ax25_dev *ax25_dev = NULL;
ax25_uid_assoc *user;
ax25_address call;
ax25_cb *ax25;
int err = 0;
if (addr_len != sizeof(struct sockaddr_ax25) &&
addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_bind(): uses old (6 digipeater) socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25)))
return -EINVAL;
if (addr->fsa_ax25.sax25_family != AF_AX25)
return -EINVAL;
user = ax25_findbyuid(current_euid());
if (user) {
call = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_ADMIN))
return -EACCES;
call = addr->fsa_ax25.sax25_call;
}
lock_sock(sk);
ax25 = sk_to_ax25(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
err = -EINVAL;
goto out;
}
ax25->source_addr = call;
/*
* User already set interface with SO_BINDTODEVICE
*/
if (ax25->ax25_dev != NULL)
goto done;
if (addr_len > sizeof(struct sockaddr_ax25) && addr->fsa_ax25.sax25_ndigis == 1) {
if (ax25cmp(&addr->fsa_digipeater[0], &null_ax25_address) != 0 &&
(ax25_dev = ax25_addr_ax25dev(&addr->fsa_digipeater[0])) == NULL) {
err = -EADDRNOTAVAIL;
goto out;
}
} else {
if ((ax25_dev = ax25_addr_ax25dev(&addr->fsa_ax25.sax25_call)) == NULL) {
err = -EADDRNOTAVAIL;
goto out;
}
}
if (ax25_dev) {
ax25_fillin_cb(ax25, ax25_dev);
netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
}
done:
ax25_cb_add(ax25);
sock_reset_flag(sk, SOCK_ZAPPED);
out:
release_sock(sk);
return err;
}
/*
* FIXME: nonblock behaviour looks like it may have a bug.
*/
static int __must_check ax25_connect(struct socket *sock,
struct sockaddr *uaddr, int addr_len, int flags)
{
struct sock *sk = sock->sk;
ax25_cb *ax25 = sk_to_ax25(sk), *ax25t;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr;
ax25_digi *digi = NULL;
int ct = 0, err = 0;
/*
* some sanity checks. code further down depends on this
*/
if (addr_len == sizeof(struct sockaddr_ax25))
/* support for this will go away in early 2.5.x
* ax25_connect(): uses obsolete socket structure
*/
;
else if (addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_connect(): uses old (6 digipeater) socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25)))
return -EINVAL;
if (fsa->fsa_ax25.sax25_family != AF_AX25)
return -EINVAL;
lock_sock(sk);
/* deal with restarts */
if (sock->state == SS_CONNECTING) {
switch (sk->sk_state) {
case TCP_SYN_SENT: /* still trying */
err = -EINPROGRESS;
goto out_release;
case TCP_ESTABLISHED: /* connection established */
sock->state = SS_CONNECTED;
goto out_release;
case TCP_CLOSE: /* connection refused */
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
}
if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
kfree(ax25->digipeat);
ax25->digipeat = NULL;
/*
* Handle digi-peaters to be used.
*/
if (addr_len > sizeof(struct sockaddr_ax25) &&
fsa->fsa_ax25.sax25_ndigis != 0) {
/* Valid number of digipeaters ? */
if (fsa->fsa_ax25.sax25_ndigis < 1 ||
fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
addr_len < sizeof(struct sockaddr_ax25) +
sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
err = -EINVAL;
goto out_release;
}
if ((digi = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) {
err = -ENOBUFS;
goto out_release;
}
digi->ndigi = fsa->fsa_ax25.sax25_ndigis;
digi->lastrepeat = -1;
while (ct < fsa->fsa_ax25.sax25_ndigis) {
if ((fsa->fsa_digipeater[ct].ax25_call[6] &
AX25_HBIT) && ax25->iamdigi) {
digi->repeated[ct] = 1;
digi->lastrepeat = ct;
} else {
digi->repeated[ct] = 0;
}
digi->calls[ct] = fsa->fsa_digipeater[ct];
ct++;
}
}
/*
* Must bind first - autobinding in this may or may not work. If
* the socket is already bound, check to see if the device has
* been filled in, error if it hasn't.
*/
if (sock_flag(sk, SOCK_ZAPPED)) {
/* check if we can remove this feature. It is broken. */
printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact [email protected]\n",
current->comm);
if ((err = ax25_rt_autobind(ax25, &fsa->fsa_ax25.sax25_call)) < 0) {
kfree(digi);
goto out_release;
}
ax25_fillin_cb(ax25, ax25->ax25_dev);
ax25_cb_add(ax25);
} else {
if (ax25->ax25_dev == NULL) {
kfree(digi);
err = -EHOSTUNREACH;
goto out_release;
}
}
if (sk->sk_type == SOCK_SEQPACKET &&
(ax25t=ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi,
ax25->ax25_dev->dev))) {
kfree(digi);
err = -EADDRINUSE; /* Already such a connection */
ax25_cb_put(ax25t);
goto out_release;
}
ax25->dest_addr = fsa->fsa_ax25.sax25_call;
ax25->digipeat = digi;
/* First the easy one */
if (sk->sk_type != SOCK_SEQPACKET) {
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
goto out_release;
}
/* Move to connecting socket, ax.25 lapb WAIT_UA.. */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_establish_data_link(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
if (ax25->ax25_dev->dama.slave)
ax25_ds_establish_data_link(ax25);
else
ax25_std_establish_data_link(ax25);
break;
#endif
}
ax25->state = AX25_STATE_1;
ax25_start_heartbeat(ax25);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
/* Not in ABM, not in WAIT_UA -> failed */
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
err = 0;
out_release:
release_sock(sk);
return err;
}
static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if (sock->state != SS_UNCONNECTED)
return -EINVAL;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
/*
* The read queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
kfree_skb(skb);
sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
out:
release_sock(sk);
return err;
}
static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr;
struct sock *sk = sock->sk;
unsigned char ndigi, i;
ax25_cb *ax25;
int err = 0;
memset(fsa, 0, sizeof(*fsa));
lock_sock(sk);
ax25 = sk_to_ax25(sk);
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->dest_addr;
if (ax25->digipeat != NULL) {
ndigi = ax25->digipeat->ndigi;
fsa->fsa_ax25.sax25_ndigis = ndigi;
for (i = 0; i < ndigi; i++)
fsa->fsa_digipeater[i] =
ax25->digipeat->calls[i];
}
} else {
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->source_addr;
fsa->fsa_ax25.sax25_ndigis = 1;
if (ax25->ax25_dev != NULL) {
memcpy(&fsa->fsa_digipeater[0],
ax25->ax25_dev->dev->dev_addr, AX25_ADDR_LEN);
} else {
fsa->fsa_digipeater[0] = null_ax25_address;
}
}
err = sizeof (struct full_sockaddr_ax25);
out:
release_sock(sk);
return err;
}
static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name);
struct sock *sk = sock->sk;
struct sockaddr_ax25 sax;
struct sk_buff *skb;
ax25_digi dtmp, *dp;
ax25_cb *ax25;
size_t size;
int lv, err, addr_len = msg->msg_namelen;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
lock_sock(sk);
ax25 = sk_to_ax25(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
err = -EADDRNOTAVAIL;
goto out;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
}
if (ax25->ax25_dev == NULL) {
err = -ENETUNREACH;
goto out;
}
if (len > ax25->ax25_dev->dev->mtu) {
err = -EMSGSIZE;
goto out;
}
if (usax != NULL) {
if (usax->sax25_family != AF_AX25) {
err = -EINVAL;
goto out;
}
if (addr_len == sizeof(struct sockaddr_ax25))
/* ax25_sendmsg(): uses obsolete socket structure */
;
else if (addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_sendmsg(): uses old (6 digipeater)
* socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25))) {
err = -EINVAL;
goto out;
}
if (addr_len > sizeof(struct sockaddr_ax25) && usax->sax25_ndigis != 0) {
int ct = 0;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
/* Valid number of digipeaters ? */
if (usax->sax25_ndigis < 1 ||
usax->sax25_ndigis > AX25_MAX_DIGIS ||
addr_len < sizeof(struct sockaddr_ax25) +
sizeof(ax25_address) * usax->sax25_ndigis) {
err = -EINVAL;
goto out;
}
dtmp.ndigi = usax->sax25_ndigis;
while (ct < usax->sax25_ndigis) {
dtmp.repeated[ct] = 0;
dtmp.calls[ct] = fsa->fsa_digipeater[ct];
ct++;
}
dtmp.lastrepeat = 0;
}
sax = *usax;
if (sk->sk_type == SOCK_SEQPACKET &&
ax25cmp(&ax25->dest_addr, &sax.sax25_call)) {
err = -EISCONN;
goto out;
}
if (usax->sax25_ndigis == 0)
dp = NULL;
else
dp = &dtmp;
} else {
/*
* FIXME: 1003.1g - if the socket is like this because
* it has become closed (not started closed) and is VC
* we ought to SIGPIPE, EPIPE
*/
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
sax.sax25_family = AF_AX25;
sax.sax25_call = ax25->dest_addr;
dp = ax25->digipeat;
}
/* Build a packet */
/* Assume the worst case */
size = len + ax25->ax25_dev->dev->hard_header_len;
skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
skb_reserve(skb, size - len);
/* User data follows immediately after the AX.25 data */
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
err = -EFAULT;
kfree_skb(skb);
goto out;
}
skb_reset_network_header(skb);
/* Add the PID if one is not supplied by the user in the skb */
if (!ax25->pidincl)
*(u8 *)skb_push(skb, 1) = sk->sk_protocol;
if (sk->sk_type == SOCK_SEQPACKET) {
/* Connected mode sockets go via the LAPB machine */
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
}
/* Shove it onto the queue and kick */
ax25_output(ax25, ax25->paclen, skb);
err = len;
goto out;
}
skb_push(skb, 1 + ax25_addr_size(dp));
/* Building AX.25 Header */
/* Build an AX.25 header */
lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
dp, AX25_COMMAND, AX25_MODULUS);
skb_set_transport_header(skb, lv);
*skb_transport_header(skb) = AX25_UI;
/* Datagram frames go straight out of the door as UI */
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
err = len;
out:
release_sock(sk);
return err;
}
static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb, *last;
struct sk_buff_head *sk_queue;
int copied;
int err = 0;
int off = 0;
long timeo;
lock_sock(sk);
/*
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
/* We need support for non-blocking reads. */
sk_queue = &sk->sk_receive_queue;
skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off, &err, &last);
/* If no packet is available, release_sock(sk) and try again. */
if (!skb) {
if (err != -EAGAIN)
goto out;
release_sock(sk);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
while (timeo && !__skb_wait_for_more_packets(sk, sk_queue, &err,
&timeo, last)) {
skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off,
&err, &last);
if (skb)
break;
if (err != -EAGAIN)
goto done;
}
if (!skb)
goto done;
lock_sock(sk);
}
if (!sk_to_ax25(sk)->pidincl)
skb_pull(skb, 1); /* Remove PID */
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_msg(skb, 0, msg, copied);
if (msg->msg_name) {
ax25_digi digi;
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name);
memset(sax, 0, sizeof(struct full_sockaddr_ax25));
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;
/* We set this correctly, even though we may not let the
application know the digi calls further down (because it
did NOT ask to know them). This could get political... **/
sax->sax25_ndigis = digi.ndigi;
sax->sax25_call = src;
if (sax->sax25_ndigis != 0) {
int ct;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax;
for (ct = 0; ct < digi.ndigi; ct++)
fsa->fsa_digipeater[ct] = digi.calls[ct];
}
msg->msg_namelen = sizeof(struct full_sockaddr_ax25);
}
skb_free_datagram(sk, skb);
err = copied;
out:
release_sock(sk);
done:
return err;
}
static int ax25_shutdown(struct socket *sk, int how)
{
/* FIXME - generate DM and RNR states */
return -EOPNOTSUPP;
}
static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
int res = 0;
lock_sock(sk);
switch (cmd) {
case TIOCOUTQ: {
long amount;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
res = put_user(amount, (int __user *)argp);
break;
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
res = put_user(amount, (int __user *) argp);
break;
}
case SIOCAX25ADDUID: /* Add a uid to the uid/call map table */
case SIOCAX25DELUID: /* Delete a uid from the uid/call map table */
case SIOCAX25GETUID: {
struct sockaddr_ax25 sax25;
if (copy_from_user(&sax25, argp, sizeof(sax25))) {
res = -EFAULT;
break;
}
res = ax25_uid_ioctl(cmd, &sax25);
break;
}
case SIOCAX25NOUID: { /* Set the default policy (default/bar) */
long amount;
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
if (get_user(amount, (long __user *)argp)) {
res = -EFAULT;
break;
}
if (amount < 0 || amount > AX25_NOUID_BLOCK) {
res = -EINVAL;
break;
}
ax25_uid_policy = amount;
res = 0;
break;
}
case SIOCADDRT:
case SIOCDELRT:
case SIOCAX25OPTRT:
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
res = ax25_rt_ioctl(cmd, argp);
break;
case SIOCAX25CTLCON:
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
res = ax25_ctl_ioctl(cmd, argp);
break;
case SIOCAX25GETINFO:
case SIOCAX25GETINFOOLD: {
ax25_cb *ax25 = sk_to_ax25(sk);
struct ax25_info_struct ax25_info;
ax25_info.t1 = ax25->t1 / HZ;
ax25_info.t2 = ax25->t2 / HZ;
ax25_info.t3 = ax25->t3 / HZ;
ax25_info.idle = ax25->idle / (60 * HZ);
ax25_info.n2 = ax25->n2;
ax25_info.t1timer = ax25_display_timer(&ax25->t1timer) / HZ;
ax25_info.t2timer = ax25_display_timer(&ax25->t2timer) / HZ;
ax25_info.t3timer = ax25_display_timer(&ax25->t3timer) / HZ;
ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ);
ax25_info.n2count = ax25->n2count;
ax25_info.state = ax25->state;
ax25_info.rcv_q = sk_rmem_alloc_get(sk);
ax25_info.snd_q = sk_wmem_alloc_get(sk);
ax25_info.vs = ax25->vs;
ax25_info.vr = ax25->vr;
ax25_info.va = ax25->va;
ax25_info.vs_max = ax25->vs; /* reserved */
ax25_info.paclen = ax25->paclen;
ax25_info.window = ax25->window;
/* old structure? */
if (cmd == SIOCAX25GETINFOOLD) {
static int warned = 0;
if (!warned) {
printk(KERN_INFO "%s uses old SIOCAX25GETINFO\n",
current->comm);
warned=1;
}
if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct_deprecated))) {
res = -EFAULT;
break;
}
} else {
if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct))) {
res = -EINVAL;
break;
}
}
res = 0;
break;
}
case SIOCAX25ADDFWD:
case SIOCAX25DELFWD: {
struct ax25_fwd_struct ax25_fwd;
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
if (copy_from_user(&ax25_fwd, argp, sizeof(ax25_fwd))) {
res = -EFAULT;
break;
}
res = ax25_fwd_ioctl(cmd, &ax25_fwd);
break;
}
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
res = -EINVAL;
break;
default:
res = -ENOIOCTLCMD;
break;
}
release_sock(sk);
return res;
}
#ifdef CONFIG_PROC_FS
static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_list_lock)
{
spin_lock_bh(&ax25_list_lock);
return seq_hlist_start(&ax25_list, *pos);
}
static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &ax25_list, pos);
}
static void ax25_info_stop(struct seq_file *seq, void *v)
__releases(ax25_list_lock)
{
spin_unlock_bh(&ax25_list_lock);
}
static int ax25_info_show(struct seq_file *seq, void *v)
{
ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node);
char buf[11];
int k;
/*
* New format:
* magic dev src_addr dest_addr,digi1,digi2,.. st vs vr va t1 t1 t2 t2 t3 t3 idle idle n2 n2 rtt window paclen Snd-Q Rcv-Q inode
*/
seq_printf(seq, "%p %s %s%s ",
ax25,
ax25->ax25_dev == NULL? "???" : ax25->ax25_dev->dev->name,
ax2asc(buf, &ax25->source_addr),
ax25->iamdigi? "*":"");
seq_printf(seq, "%s", ax2asc(buf, &ax25->dest_addr));
for (k=0; (ax25->digipeat != NULL) && (k < ax25->digipeat->ndigi); k++) {
seq_printf(seq, ",%s%s",
ax2asc(buf, &ax25->digipeat->calls[k]),
ax25->digipeat->repeated[k]? "*":"");
}
seq_printf(seq, " %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %d %d",
ax25->state,
ax25->vs, ax25->vr, ax25->va,
ax25_display_timer(&ax25->t1timer) / HZ, ax25->t1 / HZ,
ax25_display_timer(&ax25->t2timer) / HZ, ax25->t2 / HZ,
ax25_display_timer(&ax25->t3timer) / HZ, ax25->t3 / HZ,
ax25_display_timer(&ax25->idletimer) / (60 * HZ),
ax25->idle / (60 * HZ),
ax25->n2count, ax25->n2,
ax25->rtt / HZ,
ax25->window,
ax25->paclen);
if (ax25->sk != NULL) {
seq_printf(seq, " %d %d %lu\n",
sk_wmem_alloc_get(ax25->sk),
sk_rmem_alloc_get(ax25->sk),
sock_i_ino(ax25->sk));
} else {
seq_puts(seq, " * * *\n");
}
return 0;
}
static const struct seq_operations ax25_info_seqops = {
.start = ax25_info_start,
.next = ax25_info_next,
.stop = ax25_info_stop,
.show = ax25_info_show,
};
#endif
static const struct net_proto_family ax25_family_ops = {
.family = PF_AX25,
.create = ax25_create,
.owner = THIS_MODULE,
};
static const struct proto_ops ax25_proto_ops = {
.family = PF_AX25,
.owner = THIS_MODULE,
.release = ax25_release,
.bind = ax25_bind,
.connect = ax25_connect,
.socketpair = sock_no_socketpair,
.accept = ax25_accept,
.getname = ax25_getname,
.poll = datagram_poll,
.ioctl = ax25_ioctl,
.gettstamp = sock_gettstamp,
.listen = ax25_listen,
.shutdown = ax25_shutdown,
.setsockopt = ax25_setsockopt,
.getsockopt = ax25_getsockopt,
.sendmsg = ax25_sendmsg,
.recvmsg = ax25_recvmsg,
.mmap = sock_no_mmap,
};
/*
* Called by socket.c on kernel start up
*/
static struct packet_type ax25_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_AX25),
.func = ax25_kiss_rcv,
};
static struct notifier_block ax25_dev_notifier = {
.notifier_call = ax25_device_event,
};
static int __init ax25_init(void)
{
int rc = proto_register(&ax25_proto, 0);
if (rc != 0)
goto out;
sock_register(&ax25_family_ops);
dev_add_pack(&ax25_packet_type);
register_netdevice_notifier(&ax25_dev_notifier);
proc_create_seq("ax25_route", 0444, init_net.proc_net, &ax25_rt_seqops);
proc_create_seq("ax25", 0444, init_net.proc_net, &ax25_info_seqops);
proc_create_seq("ax25_calls", 0444, init_net.proc_net,
&ax25_uid_seqops);
out:
return rc;
}
module_init(ax25_init);
MODULE_AUTHOR("Jonathan Naylor G4KLX <[email protected]>");
MODULE_DESCRIPTION("The amateur radio AX.25 link layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_AX25);
static void __exit ax25_exit(void)
{
remove_proc_entry("ax25_route", init_net.proc_net);
remove_proc_entry("ax25", init_net.proc_net);
remove_proc_entry("ax25_calls", init_net.proc_net);
unregister_netdevice_notifier(&ax25_dev_notifier);
dev_remove_pack(&ax25_packet_type);
sock_unregister(PF_AX25);
proto_unregister(&ax25_proto);
ax25_rt_free();
ax25_uid_free();
ax25_dev_free();
}
module_exit(ax25_exit);
| linux-master | net/ax25/af_ax25.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS ([email protected])
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
* Copyright (C) Steven Whitehouse GW7RRM ([email protected])
* Copyright (C) Joerg Reuter DL1BKE ([email protected])
* Copyright (C) Hans-Joachim Hetscher DD8NE ([email protected])
* Copyright (C) Frederic Rible F1OAT ([email protected])
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/timer.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/export.h>
static ax25_route *ax25_route_list;
DEFINE_RWLOCK(ax25_route_lock);
void ax25_rt_device_down(struct net_device *dev)
{
ax25_route *s, *t, *ax25_rt;
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
s = ax25_rt;
ax25_rt = ax25_rt->next;
if (s->dev == dev) {
if (ax25_route_list == s) {
ax25_route_list = s->next;
kfree(s->digipeat);
kfree(s);
} else {
for (t = ax25_route_list; t != NULL; t = t->next) {
if (t->next == s) {
t->next = s->next;
kfree(s->digipeat);
kfree(s);
break;
}
}
}
}
}
write_unlock_bh(&ax25_route_lock);
}
static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
{
ax25_route *ax25_rt;
ax25_dev *ax25_dev;
int i;
if (route->digi_count > AX25_MAX_DIGIS)
return -EINVAL;
ax25_dev = ax25_addr_ax25dev(&route->port_addr);
if (!ax25_dev)
return -EINVAL;
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
if (ax25cmp(&ax25_rt->callsign, &route->dest_addr) == 0 &&
ax25_rt->dev == ax25_dev->dev) {
kfree(ax25_rt->digipeat);
ax25_rt->digipeat = NULL;
if (route->digi_count != 0) {
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
ax25_dev_put(ax25_dev);
return -ENOMEM;
}
ax25_rt->digipeat->lastrepeat = -1;
ax25_rt->digipeat->ndigi = route->digi_count;
for (i = 0; i < route->digi_count; i++) {
ax25_rt->digipeat->repeated[i] = 0;
ax25_rt->digipeat->calls[i] = route->digi_addr[i];
}
}
write_unlock_bh(&ax25_route_lock);
ax25_dev_put(ax25_dev);
return 0;
}
ax25_rt = ax25_rt->next;
}
if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
ax25_dev_put(ax25_dev);
return -ENOMEM;
}
ax25_rt->callsign = route->dest_addr;
ax25_rt->dev = ax25_dev->dev;
ax25_rt->digipeat = NULL;
ax25_rt->ip_mode = ' ';
if (route->digi_count != 0) {
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
kfree(ax25_rt);
ax25_dev_put(ax25_dev);
return -ENOMEM;
}
ax25_rt->digipeat->lastrepeat = -1;
ax25_rt->digipeat->ndigi = route->digi_count;
for (i = 0; i < route->digi_count; i++) {
ax25_rt->digipeat->repeated[i] = 0;
ax25_rt->digipeat->calls[i] = route->digi_addr[i];
}
}
ax25_rt->next = ax25_route_list;
ax25_route_list = ax25_rt;
write_unlock_bh(&ax25_route_lock);
ax25_dev_put(ax25_dev);
return 0;
}
void __ax25_put_route(ax25_route *ax25_rt)
{
kfree(ax25_rt->digipeat);
kfree(ax25_rt);
}
static int ax25_rt_del(struct ax25_routes_struct *route)
{
ax25_route *s, *t, *ax25_rt;
ax25_dev *ax25_dev;
if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
return -EINVAL;
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
s = ax25_rt;
ax25_rt = ax25_rt->next;
if (s->dev == ax25_dev->dev &&
ax25cmp(&route->dest_addr, &s->callsign) == 0) {
if (ax25_route_list == s) {
ax25_route_list = s->next;
__ax25_put_route(s);
} else {
for (t = ax25_route_list; t != NULL; t = t->next) {
if (t->next == s) {
t->next = s->next;
__ax25_put_route(s);
break;
}
}
}
}
}
write_unlock_bh(&ax25_route_lock);
ax25_dev_put(ax25_dev);
return 0;
}
static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
{
ax25_route *ax25_rt;
ax25_dev *ax25_dev;
int err = 0;
if ((ax25_dev = ax25_addr_ax25dev(&rt_option->port_addr)) == NULL)
return -EINVAL;
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
if (ax25_rt->dev == ax25_dev->dev &&
ax25cmp(&rt_option->dest_addr, &ax25_rt->callsign) == 0) {
switch (rt_option->cmd) {
case AX25_SET_RT_IPMODE:
switch (rt_option->arg) {
case ' ':
case 'D':
case 'V':
ax25_rt->ip_mode = rt_option->arg;
break;
default:
err = -EINVAL;
goto out;
}
break;
default:
err = -EINVAL;
goto out;
}
}
ax25_rt = ax25_rt->next;
}
out:
write_unlock_bh(&ax25_route_lock);
ax25_dev_put(ax25_dev);
return err;
}
int ax25_rt_ioctl(unsigned int cmd, void __user *arg)
{
struct ax25_route_opt_struct rt_option;
struct ax25_routes_struct route;
switch (cmd) {
case SIOCADDRT:
if (copy_from_user(&route, arg, sizeof(route)))
return -EFAULT;
return ax25_rt_add(&route);
case SIOCDELRT:
if (copy_from_user(&route, arg, sizeof(route)))
return -EFAULT;
return ax25_rt_del(&route);
case SIOCAX25OPTRT:
if (copy_from_user(&rt_option, arg, sizeof(rt_option)))
return -EFAULT;
return ax25_rt_opt(&rt_option);
default:
return -EINVAL;
}
}
#ifdef CONFIG_PROC_FS
static void *ax25_rt_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_route_lock)
{
struct ax25_route *ax25_rt;
int i = 1;
read_lock(&ax25_route_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (ax25_rt = ax25_route_list; ax25_rt != NULL; ax25_rt = ax25_rt->next) {
if (i == *pos)
return ax25_rt;
++i;
}
return NULL;
}
static void *ax25_rt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? ax25_route_list :
((struct ax25_route *) v)->next;
}
static void ax25_rt_seq_stop(struct seq_file *seq, void *v)
__releases(ax25_route_lock)
{
read_unlock(&ax25_route_lock);
}
static int ax25_rt_seq_show(struct seq_file *seq, void *v)
{
char buf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq, "callsign dev mode digipeaters\n");
else {
struct ax25_route *ax25_rt = v;
const char *callsign;
int i;
if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0)
callsign = "default";
else
callsign = ax2asc(buf, &ax25_rt->callsign);
seq_printf(seq, "%-9s %-4s",
callsign,
ax25_rt->dev ? ax25_rt->dev->name : "???");
switch (ax25_rt->ip_mode) {
case 'V':
seq_puts(seq, " vc");
break;
case 'D':
seq_puts(seq, " dg");
break;
default:
seq_puts(seq, " *");
break;
}
if (ax25_rt->digipeat != NULL)
for (i = 0; i < ax25_rt->digipeat->ndigi; i++)
seq_printf(seq, " %s",
ax2asc(buf, &ax25_rt->digipeat->calls[i]));
seq_puts(seq, "\n");
}
return 0;
}
const struct seq_operations ax25_rt_seqops = {
.start = ax25_rt_seq_start,
.next = ax25_rt_seq_next,
.stop = ax25_rt_seq_stop,
.show = ax25_rt_seq_show,
};
#endif
/*
* Find AX.25 route
*
* Only routes with a reference count of zero can be destroyed.
* Must be called with ax25_route_lock read locked.
*/
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
{
ax25_route *ax25_spe_rt = NULL;
ax25_route *ax25_def_rt = NULL;
ax25_route *ax25_rt;
/*
* Bind to the physical interface we heard them on, or the default
* route if none is found;
*/
for (ax25_rt = ax25_route_list; ax25_rt != NULL; ax25_rt = ax25_rt->next) {
if (dev == NULL) {
if (ax25cmp(&ax25_rt->callsign, addr) == 0 && ax25_rt->dev != NULL)
ax25_spe_rt = ax25_rt;
if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0 && ax25_rt->dev != NULL)
ax25_def_rt = ax25_rt;
} else {
if (ax25cmp(&ax25_rt->callsign, addr) == 0 && ax25_rt->dev == dev)
ax25_spe_rt = ax25_rt;
if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0 && ax25_rt->dev == dev)
ax25_def_rt = ax25_rt;
}
}
ax25_rt = ax25_def_rt;
if (ax25_spe_rt != NULL)
ax25_rt = ax25_spe_rt;
return ax25_rt;
}
/*
* Adjust path: If you specify a default route and want to connect
* a target on the digipeater path but w/o having a special route
* set before, the path has to be truncated from your target on.
*/
static inline void ax25_adjust_path(ax25_address *addr, ax25_digi *digipeat)
{
int k;
for (k = 0; k < digipeat->ndigi; k++) {
if (ax25cmp(addr, &digipeat->calls[k]) == 0)
break;
}
digipeat->ndigi = k;
}
/*
* Find which interface to use.
*/
int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
{
ax25_uid_assoc *user;
ax25_route *ax25_rt;
int err = 0;
ax25_route_lock_use();
ax25_rt = ax25_get_route(addr, NULL);
if (!ax25_rt) {
ax25_route_lock_unuse();
return -EHOSTUNREACH;
}
if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
err = -EHOSTUNREACH;
goto put;
}
user = ax25_findbyuid(current_euid());
if (user) {
ax25->source_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
err = -EPERM;
goto put;
}
ax25->source_addr = *(ax25_address *)ax25->ax25_dev->dev->dev_addr;
}
if (ax25_rt->digipeat != NULL) {
ax25->digipeat = kmemdup(ax25_rt->digipeat, sizeof(ax25_digi),
GFP_ATOMIC);
if (ax25->digipeat == NULL) {
err = -ENOMEM;
goto put;
}
ax25_adjust_path(addr, ax25->digipeat);
}
if (ax25->sk != NULL) {
local_bh_disable();
bh_lock_sock(ax25->sk);
sock_reset_flag(ax25->sk, SOCK_ZAPPED);
bh_unlock_sock(ax25->sk);
local_bh_enable();
}
put:
ax25_route_lock_unuse();
return err;
}
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
ax25_address *dest, ax25_digi *digi)
{
unsigned char *bp;
int len;
len = digi->ndigi * AX25_ADDR_LEN;
if (unlikely(skb_headroom(skb) < len)) {
skb = skb_expand_head(skb, len);
if (!skb) {
printk(KERN_CRIT "AX.25: ax25_dg_build_path - out of memory\n");
return NULL;
}
}
bp = skb_push(skb, len);
ax25_addr_build(bp, src, dest, digi, AX25_COMMAND, AX25_MODULUS);
return skb;
}
/*
* Free all memory associated with routing structures.
*/
void __exit ax25_rt_free(void)
{
ax25_route *s, *ax25_rt = ax25_route_list;
write_lock_bh(&ax25_route_lock);
while (ax25_rt != NULL) {
s = ax25_rt;
ax25_rt = ax25_rt->next;
kfree(s->digipeat);
kfree(s);
}
write_unlock_bh(&ax25_route_lock);
}
| linux-master | net/ax25/ax25_route.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* The following routines are taken from page 170 of the 7th ARRL Computer
* Networking Conference paper, as is the whole state machine.
*/
void ax25_std_nr_error_recovery(ax25_cb *ax25)
{
ax25_std_establish_data_link(ax25);
}
void ax25_std_establish_data_link(ax25_cb *ax25)
{
ax25->condition = 0x00;
ax25->n2count = 0;
if (ax25->modulus == AX25_MODULUS)
ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND);
else
ax25_send_control(ax25, AX25_SABME, AX25_POLLON, AX25_COMMAND);
ax25_calculate_t1(ax25);
ax25_stop_idletimer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_t2timer(ax25);
ax25_start_t1timer(ax25);
}
void ax25_std_transmit_enquiry(ax25_cb *ax25)
{
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25_send_control(ax25, AX25_RNR, AX25_POLLON, AX25_COMMAND);
else
ax25_send_control(ax25, AX25_RR, AX25_POLLON, AX25_COMMAND);
ax25->condition &= ~AX25_COND_ACK_PENDING;
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}
void ax25_std_enquiry_response(ax25_cb *ax25)
{
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25_send_control(ax25, AX25_RNR, AX25_POLLON, AX25_RESPONSE);
else
ax25_send_control(ax25, AX25_RR, AX25_POLLON, AX25_RESPONSE);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
void ax25_std_timeout_response(ax25_cb *ax25)
{
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25_send_control(ax25, AX25_RNR, AX25_POLLOFF, AX25_RESPONSE);
else
ax25_send_control(ax25, AX25_RR, AX25_POLLOFF, AX25_RESPONSE);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
| linux-master | net/ax25/ax25_std_subr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
* Copyright (C) Joerg Reuter DL1BKE ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* State machine for state 1, Awaiting Connection State.
* The handling of the timer(s) is in file ax25_ds_timer.c.
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_ds_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
{
switch (frametype) {
case AX25_SABM:
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
break;
case AX25_SABME:
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE);
break;
case AX25_UA:
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_ESTABLISHED;
/*
* For WAIT_SABM connections we will produce an accept
* ready socket here
*/
if (!sock_flag(ax25->sk, SOCK_DEAD))
ax25->sk->sk_state_change(ax25->sk);
bh_unlock_sock(ax25->sk);
}
ax25_dama_on(ax25);
/* according to DK4EG's spec we are required to
* send a RR RESPONSE FINAL NR=0.
*/
ax25_std_enquiry_response(ax25);
break;
case AX25_DM:
if (pf)
ax25_disconnect(ax25, ECONNREFUSED);
break;
default:
if (pf)
ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND);
break;
}
return 0;
}
/*
* State machine for state 2, Awaiting Release State.
* The handling of the timer(s) is in file ax25_ds_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_ds_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
{
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_dama_off(ax25);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_dama_off(ax25);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
case AX25_UA:
if (pf) {
ax25_dama_off(ax25);
ax25_disconnect(ax25, 0);
}
break;
case AX25_I:
case AX25_REJ:
case AX25_RNR:
case AX25_RR:
if (pf) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_dama_off(ax25);
}
break;
default:
break;
}
return 0;
}
/*
* State machine for state 3, Connected State.
* The handling of the timer(s) is in file ax25_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_ds_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
{
int queued = 0;
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
if (frametype == AX25_SABM) {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
} else {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
}
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->condition = 0x00;
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25_requeue_frames(ax25);
ax25_dama_on(ax25);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_dama_off(ax25);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
ax25_dama_off(ax25);
ax25_disconnect(ax25, ECONNRESET);
break;
case AX25_RR:
case AX25_RNR:
if (frametype == AX25_RR)
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
else
ax25->condition |= AX25_COND_PEER_RX_BUSY;
if (ax25_validate_nr(ax25, nr)) {
if (ax25_check_iframes_acked(ax25, nr))
ax25->n2count=0;
if (type == AX25_COMMAND && pf)
ax25_ds_enquiry_response(ax25);
} else {
ax25_ds_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_REJ:
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
if (ax25_validate_nr(ax25, nr)) {
if (ax25->va != nr)
ax25->n2count=0;
ax25_frames_acked(ax25, nr);
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_requeue_frames(ax25);
if (type == AX25_COMMAND && pf)
ax25_ds_enquiry_response(ax25);
} else {
ax25_ds_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_I:
if (!ax25_validate_nr(ax25, nr)) {
ax25_ds_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
break;
}
if (ax25->condition & AX25_COND_PEER_RX_BUSY) {
ax25_frames_acked(ax25, nr);
ax25->n2count = 0;
} else {
if (ax25_check_iframes_acked(ax25, nr))
ax25->n2count = 0;
}
if (ax25->condition & AX25_COND_OWN_RX_BUSY) {
if (pf) ax25_ds_enquiry_response(ax25);
break;
}
if (ns == ax25->vr) {
ax25->vr = (ax25->vr + 1) % ax25->modulus;
queued = ax25_rx_iframe(ax25, skb);
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25->vr = ns; /* ax25->vr - 1 */
ax25->condition &= ~AX25_COND_REJECT;
if (pf) {
ax25_ds_enquiry_response(ax25);
} else {
if (!(ax25->condition & AX25_COND_ACK_PENDING)) {
ax25->condition |= AX25_COND_ACK_PENDING;
ax25_start_t2timer(ax25);
}
}
} else {
if (ax25->condition & AX25_COND_REJECT) {
if (pf) ax25_ds_enquiry_response(ax25);
} else {
ax25->condition |= AX25_COND_REJECT;
ax25_ds_enquiry_response(ax25);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
}
break;
case AX25_FRMR:
case AX25_ILLEGAL:
ax25_ds_establish_data_link(ax25);
ax25->state = AX25_STATE_1;
break;
default:
break;
}
return queued;
}
/*
* Higher level upcall for a LAPB frame
*/
int ax25_ds_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type)
{
int queued = 0, frametype, ns, nr, pf;
frametype = ax25_decode(ax25, skb, &ns, &nr, &pf);
switch (ax25->state) {
case AX25_STATE_1:
queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type);
break;
case AX25_STATE_2:
queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type);
break;
case AX25_STATE_3:
queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type);
break;
}
return queued;
}
| linux-master | net/ax25/ax25_ds_in.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS ([email protected])
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
* Copyright (C) Joerg Reuter DL1BKE ([email protected])
* Copyright (C) Frederic Rible F1OAT ([email protected])
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* This routine purges all the queues of frames.
*/
void ax25_clear_queues(ax25_cb *ax25)
{
skb_queue_purge(&ax25->write_queue);
skb_queue_purge(&ax25->ack_queue);
skb_queue_purge(&ax25->reseq_queue);
skb_queue_purge(&ax25->frag_queue);
}
/*
* This routine purges the input queue of those frames that have been
* acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
* SDL diagram.
*/
void ax25_frames_acked(ax25_cb *ax25, unsigned short nr)
{
struct sk_buff *skb;
/*
* Remove all the ack-ed frames from the ack queue.
*/
if (ax25->va != nr) {
while (skb_peek(&ax25->ack_queue) != NULL && ax25->va != nr) {
skb = skb_dequeue(&ax25->ack_queue);
kfree_skb(skb);
ax25->va = (ax25->va + 1) % ax25->modulus;
}
}
}
void ax25_requeue_frames(ax25_cb *ax25)
{
struct sk_buff *skb;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
* up by ax25_kick called from the timer. This arrangement handles the
* possibility of an empty output queue.
*/
while ((skb = skb_dequeue_tail(&ax25->ack_queue)) != NULL)
skb_queue_head(&ax25->write_queue, skb);
}
/*
* Validate that the value of nr is between va and vs. Return true or
* false for testing.
*/
int ax25_validate_nr(ax25_cb *ax25, unsigned short nr)
{
unsigned short vc = ax25->va;
while (vc != ax25->vs) {
if (nr == vc) return 1;
vc = (vc + 1) % ax25->modulus;
}
if (nr == ax25->vs) return 1;
return 0;
}
/*
* This routine is the centralised routine for parsing the control
* information for the different frame formats.
*/
int ax25_decode(ax25_cb *ax25, struct sk_buff *skb, int *ns, int *nr, int *pf)
{
unsigned char *frame;
int frametype = AX25_ILLEGAL;
frame = skb->data;
*ns = *nr = *pf = 0;
if (ax25->modulus == AX25_MODULUS) {
if ((frame[0] & AX25_S) == 0) {
frametype = AX25_I; /* I frame - carries NR/NS/PF */
*ns = (frame[0] >> 1) & 0x07;
*nr = (frame[0] >> 5) & 0x07;
*pf = frame[0] & AX25_PF;
} else if ((frame[0] & AX25_U) == 1) { /* S frame - take out PF/NR */
frametype = frame[0] & 0x0F;
*nr = (frame[0] >> 5) & 0x07;
*pf = frame[0] & AX25_PF;
} else if ((frame[0] & AX25_U) == 3) { /* U frame - take out PF */
frametype = frame[0] & ~AX25_PF;
*pf = frame[0] & AX25_PF;
}
skb_pull(skb, 1);
} else {
if ((frame[0] & AX25_S) == 0) {
frametype = AX25_I; /* I frame - carries NR/NS/PF */
*ns = (frame[0] >> 1) & 0x7F;
*nr = (frame[1] >> 1) & 0x7F;
*pf = frame[1] & AX25_EPF;
skb_pull(skb, 2);
} else if ((frame[0] & AX25_U) == 1) { /* S frame - take out PF/NR */
frametype = frame[0] & 0x0F;
*nr = (frame[1] >> 1) & 0x7F;
*pf = frame[1] & AX25_EPF;
skb_pull(skb, 2);
} else if ((frame[0] & AX25_U) == 3) { /* U frame - take out PF */
frametype = frame[0] & ~AX25_PF;
*pf = frame[0] & AX25_PF;
skb_pull(skb, 1);
}
}
return frametype;
}
/*
* This routine is called when the HDLC layer internally generates a
* command or response for the remote machine ( eg. RR, UA etc. ).
* Only supervisory or unnumbered frames are processed.
*/
void ax25_send_control(ax25_cb *ax25, int frametype, int poll_bit, int type)
{
struct sk_buff *skb;
unsigned char *dptr;
if ((skb = alloc_skb(ax25->ax25_dev->dev->hard_header_len + 2, GFP_ATOMIC)) == NULL)
return;
skb_reserve(skb, ax25->ax25_dev->dev->hard_header_len);
skb_reset_network_header(skb);
/* Assume a response - address structure for DTE */
if (ax25->modulus == AX25_MODULUS) {
dptr = skb_put(skb, 1);
*dptr = frametype;
*dptr |= (poll_bit) ? AX25_PF : 0;
if ((frametype & AX25_U) == AX25_S) /* S frames carry NR */
*dptr |= (ax25->vr << 5);
} else {
if ((frametype & AX25_U) == AX25_U) {
dptr = skb_put(skb, 1);
*dptr = frametype;
*dptr |= (poll_bit) ? AX25_PF : 0;
} else {
dptr = skb_put(skb, 2);
dptr[0] = frametype;
dptr[1] = (ax25->vr << 1);
dptr[1] |= (poll_bit) ? AX25_EPF : 0;
}
}
ax25_transmit_buffer(ax25, skb, type);
}
/*
* Send a 'DM' to an unknown connection attempt, or an invalid caller.
*
* Note: src here is the sender, thus it's the target of the DM
*/
void ax25_return_dm(struct net_device *dev, ax25_address *src, ax25_address *dest, ax25_digi *digi)
{
struct sk_buff *skb;
char *dptr;
ax25_digi retdigi;
if (dev == NULL)
return;
if ((skb = alloc_skb(dev->hard_header_len + 1, GFP_ATOMIC)) == NULL)
return; /* Next SABM will get DM'd */
skb_reserve(skb, dev->hard_header_len);
skb_reset_network_header(skb);
ax25_digi_invert(digi, &retdigi);
dptr = skb_put(skb, 1);
*dptr = AX25_DM | AX25_PF;
/*
* Do the address ourselves
*/
dptr = skb_push(skb, ax25_addr_size(digi));
dptr += ax25_addr_build(dptr, dest, src, &retdigi, AX25_RESPONSE, AX25_MODULUS);
ax25_queue_xmit(skb, dev);
}
/*
* Exponential backoff for AX.25
*/
void ax25_calculate_t1(ax25_cb *ax25)
{
int n, t = 2;
switch (ax25->backoff) {
case 0:
break;
case 1:
t += 2 * ax25->n2count;
break;
case 2:
for (n = 0; n < ax25->n2count; n++)
t *= 2;
if (t > 8) t = 8;
break;
}
ax25->t1 = t * ax25->rtt;
}
/*
* Calculate the Round Trip Time
*/
void ax25_calculate_rtt(ax25_cb *ax25)
{
if (ax25->backoff == 0)
return;
if (ax25_t1timer_running(ax25) && ax25->n2count == 0)
ax25->rtt = (9 * ax25->rtt + ax25->t1 - ax25_display_timer(&ax25->t1timer)) / 10;
if (ax25->rtt < AX25_T1CLAMPLO)
ax25->rtt = AX25_T1CLAMPLO;
if (ax25->rtt > AX25_T1CLAMPHI)
ax25->rtt = AX25_T1CLAMPHI;
}
void ax25_disconnect(ax25_cb *ax25, int reason)
{
ax25_clear_queues(ax25);
if (reason == ENETUNREACH) {
del_timer_sync(&ax25->timer);
del_timer_sync(&ax25->t1timer);
del_timer_sync(&ax25->t2timer);
del_timer_sync(&ax25->t3timer);
del_timer_sync(&ax25->idletimer);
} else {
if (ax25->sk && !sock_flag(ax25->sk, SOCK_DESTROY))
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
}
ax25->state = AX25_STATE_0;
ax25_link_failed(ax25, reason);
if (ax25->sk != NULL) {
local_bh_disable();
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_CLOSE;
ax25->sk->sk_err = reason;
ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
bh_unlock_sock(ax25->sk);
local_bh_enable();
}
}
| linux-master | net/ax25/ax25_subr.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.