python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Host1x init for Tegra234 SoCs * * Copyright (c) 2022 NVIDIA Corporation. */ /* include hw specification */ #include "host1x08.h" #include "host1x08_hardware.h" /* include code */ #define HOST1X_HW 8 #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" #include "intr_hw.c" #include "syncpt_hw.c" #include "../dev.h" int host1x08_init(struct host1x *host) { host->channel_op = &host1x_channel_ops; host->cdma_op = &host1x_cdma_ops; host->cdma_pb_op = &host1x_pushbuffer_ops; host->syncpt_op = &host1x_syncpt_ops; host->intr_op = &host1x_intr_ops; host->debug_op = &host1x_debug_ops; return 0; }
linux-master
drivers/gpu/host1x/hw/host1x08.c
// SPDX-License-Identifier: GPL-2.0-only /* * Host1x init for Tegra186 SoCs * * Copyright (c) 2017 NVIDIA Corporation. */ /* include hw specification */ #include "host1x06.h" #include "host1x06_hardware.h" /* include code */ #define HOST1X_HW 6 #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" #include "intr_hw.c" #include "syncpt_hw.c" #include "../dev.h" int host1x06_init(struct host1x *host) { host->channel_op = &host1x_channel_ops; host->cdma_op = &host1x_cdma_ops; host->cdma_pb_op = &host1x_pushbuffer_ops; host->syncpt_op = &host1x_syncpt_ops; host->intr_op = &host1x_intr_ops; host->debug_op = &host1x_debug_ops; return 0; }
linux-master
drivers/gpu/host1x/hw/host1x06.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 Google, Inc. * Author: Erik Gilling <[email protected]> * * Copyright (C) 2011-2013 NVIDIA Corporation */ #include "../dev.h" #include "../debug.h" #include "../cdma.h" #include "../channel.h" #define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400 enum { HOST1X_OPCODE_SETCLASS = 0x00, HOST1X_OPCODE_INCR = 0x01, HOST1X_OPCODE_NONINCR = 0x02, HOST1X_OPCODE_MASK = 0x03, HOST1X_OPCODE_IMM = 0x04, HOST1X_OPCODE_RESTART = 0x05, HOST1X_OPCODE_GATHER = 0x06, HOST1X_OPCODE_SETSTRMID = 0x07, HOST1X_OPCODE_SETAPPID = 0x08, HOST1X_OPCODE_SETPYLD = 0x09, HOST1X_OPCODE_INCR_W = 0x0a, HOST1X_OPCODE_NONINCR_W = 0x0b, HOST1X_OPCODE_GATHER_W = 0x0c, HOST1X_OPCODE_RESTART_W = 0x0d, HOST1X_OPCODE_EXTEND = 0x0e, }; enum { HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK = 0x00, HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01, }; #define INVALID_PAYLOAD 0xffffffff static unsigned int show_channel_command(struct output *o, u32 val, u32 *payload) { unsigned int mask, subop, num, opcode; opcode = val >> 28; switch (opcode) { case HOST1X_OPCODE_SETCLASS: mask = val & 0x3f; if (mask) { host1x_debug_cont(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [", val >> 6 & 0x3ff, val >> 16 & 0xfff, mask); return hweight8(mask); } host1x_debug_cont(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff); return 0; case HOST1X_OPCODE_INCR: num = val & 0xffff; host1x_debug_cont(o, "INCR(offset=%03x, [", val >> 16 & 0xfff); if (!num) host1x_debug_cont(o, "])\n"); return num; case HOST1X_OPCODE_NONINCR: num = val & 0xffff; host1x_debug_cont(o, "NONINCR(offset=%03x, [", val >> 16 & 0xfff); if (!num) host1x_debug_cont(o, "])\n"); return num; case HOST1X_OPCODE_MASK: mask = val & 0xffff; host1x_debug_cont(o, "MASK(offset=%03x, mask=%03x, [", val >> 16 & 0xfff, mask); if (!mask) host1x_debug_cont(o, "])\n"); return hweight16(mask); case HOST1X_OPCODE_IMM: host1x_debug_cont(o, "IMM(offset=%03x, data=%03x)\n", val >> 16 & 0xfff, val & 0xffff); return 0; case HOST1X_OPCODE_RESTART: host1x_debug_cont(o, "RESTART(offset=%08x)\n", val << 4); return 0; case HOST1X_OPCODE_GATHER: host1x_debug_cont(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[", val >> 16 & 0xfff, val >> 15 & 0x1, val >> 14 & 0x1, val & 0x3fff); return 1; #if HOST1X_HW >= 6 case HOST1X_OPCODE_SETSTRMID: host1x_debug_cont(o, "SETSTRMID(offset=%06x)\n", val & 0x3fffff); return 0; case HOST1X_OPCODE_SETAPPID: host1x_debug_cont(o, "SETAPPID(appid=%02x)\n", val & 0xff); return 0; case HOST1X_OPCODE_SETPYLD: *payload = val & 0xffff; host1x_debug_cont(o, "SETPYLD(data=%04x)\n", *payload); return 0; case HOST1X_OPCODE_INCR_W: case HOST1X_OPCODE_NONINCR_W: host1x_debug_cont(o, "%s(offset=%06x, ", opcode == HOST1X_OPCODE_INCR_W ? "INCR_W" : "NONINCR_W", val & 0x3fffff); if (*payload == 0) { host1x_debug_cont(o, "[])\n"); return 0; } else if (*payload == INVALID_PAYLOAD) { host1x_debug_cont(o, "unknown)\n"); return 0; } else { host1x_debug_cont(o, "["); return *payload; } case HOST1X_OPCODE_GATHER_W: host1x_debug_cont(o, "GATHER_W(count=%04x, addr=[", val & 0x3fff); return 2; #endif case HOST1X_OPCODE_EXTEND: subop = val >> 24 & 0xf; if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK) host1x_debug_cont(o, "ACQUIRE_MLOCK(index=%d)\n", val & 0xff); else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK) host1x_debug_cont(o, "RELEASE_MLOCK(index=%d)\n", val & 0xff); else host1x_debug_cont(o, "EXTEND_UNKNOWN(%08x)\n", val); return 0; default: host1x_debug_cont(o, "UNKNOWN\n"); return 0; } } static void show_gather(struct output *o, dma_addr_t phys_addr, unsigned int words, struct host1x_cdma *cdma, dma_addr_t pin_addr, u32 *map_addr) { /* Map dmaget cursor to corresponding mem handle */ u32 offset = phys_addr - pin_addr; unsigned int data_count = 0, i; u32 payload = INVALID_PAYLOAD; /* * Sometimes we're given different hardware address to the same * page - in these cases the offset will get an invalid number and * we just have to bail out. */ if (offset > HOST1X_DEBUG_MAX_PAGE_OFFSET) { host1x_debug_output(o, "[address mismatch]\n"); return; } for (i = 0; i < words; i++) { dma_addr_t addr = phys_addr + i * 4; u32 val = *(map_addr + offset / 4 + i); if (!data_count) { host1x_debug_output(o, " %pad: %08x: ", &addr, val); data_count = show_channel_command(o, val, &payload); } else { host1x_debug_cont(o, "%08x%s", val, data_count > 1 ? ", " : "])\n"); data_count--; } } } static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) { struct push_buffer *pb = &cdma->push_buffer; struct host1x_job *job; list_for_each_entry(job, &cdma->sync_queue, list) { unsigned int i; host1x_debug_output(o, "JOB, syncpt %u: %u timeout: %u num_slots: %u num_handles: %u\n", job->syncpt->id, job->syncpt_end, job->timeout, job->num_slots, job->num_unpins); show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma, pb->dma + job->first_get, pb->mapped + job->first_get); for (i = 0; i < job->num_cmds; i++) { struct host1x_job_gather *g; u32 *mapped; if (job->cmds[i].is_wait) continue; g = &job->cmds[i].gather; if (job->gather_copy_mapped) mapped = (u32 *)job->gather_copy_mapped; else mapped = host1x_bo_mmap(g->bo); if (!mapped) { host1x_debug_output(o, "[could not mmap]\n"); continue; } host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n", &g->base, g->offset, g->words); show_gather(o, g->base + g->offset, g->words, cdma, g->base, mapped); if (!job->gather_copy_mapped) host1x_bo_munmap(g->bo, mapped); } } } #if HOST1X_HW >= 6 #include "debug_hw_1x06.c" #else #include "debug_hw_1x01.c" #endif static const struct host1x_debug_ops host1x_debug_ops = { .show_channel_cdma = host1x_debug_show_channel_cdma, .show_channel_fifo = host1x_debug_show_channel_fifo, .show_mlocks = host1x_debug_show_mlocks, };
linux-master
drivers/gpu/host1x/hw/debug_hw.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 Google, Inc. * Author: Erik Gilling <[email protected]> * * Copyright (C) 2011-2017 NVIDIA Corporation */ #include "../dev.h" #include "../debug.h" #include "../cdma.h" #include "../channel.h" static void host1x_debug_show_channel_cdma(struct host1x *host, struct host1x_channel *ch, struct output *o) { struct host1x_cdma *cdma = &ch->cdma; dma_addr_t dmastart = 0, dmaend = 0; u32 dmaput, dmaget, dmactrl; u32 offset, class; u32 ch_stat; #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && HOST1X_HW >= 6 dmastart = host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART_HI); dmastart <<= 32; #endif dmastart |= host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART); #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && HOST1X_HW >= 6 dmaend = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND_HI); dmaend <<= 32; #endif dmaend |= host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND); dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT); dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET); dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL); offset = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDP_OFFSET); class = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDP_CLASS); ch_stat = host1x_ch_readl(ch, HOST1X_CHANNEL_CHANNELSTAT); host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev)); if (dmactrl & HOST1X_CHANNEL_DMACTRL_DMASTOP || !ch->cdma.push_buffer.mapped) { host1x_debug_output(o, "inactive\n\n"); return; } if (class == HOST1X_CLASS_HOST1X && offset == HOST1X_UCLASS_WAIT_SYNCPT) host1x_debug_output(o, "waiting on syncpt\n"); else host1x_debug_output(o, "active class %02x, offset %04x\n", class, offset); host1x_debug_output(o, "DMASTART %pad, DMAEND %pad\n", &dmastart, &dmaend); host1x_debug_output(o, "DMAPUT %08x DMAGET %08x DMACTL %08x\n", dmaput, dmaget, dmactrl); host1x_debug_output(o, "CHANNELSTAT %02x\n", ch_stat); show_channel_gathers(o, cdma); host1x_debug_output(o, "\n"); } static void host1x_debug_show_channel_fifo(struct host1x *host, struct host1x_channel *ch, struct output *o) { #if HOST1X_HW <= 6 u32 rd_ptr, wr_ptr, start, end; u32 payload = INVALID_PAYLOAD; unsigned int data_count = 0; #endif u32 val; host1x_debug_output(o, "%u: fifo:\n", ch->id); val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_STAT); host1x_debug_output(o, "CMDFIFO_STAT %08x\n", val); if (val & HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY) { host1x_debug_output(o, "[empty]\n"); return; } val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_RDATA); host1x_debug_output(o, "CMDFIFO_RDATA %08x\n", val); #if HOST1X_HW <= 6 /* Peek pointer values are invalid during SLCG, so disable it */ host1x_hypervisor_writel(host, 0x1, HOST1X_HV_ICG_EN_OVERRIDE); val = 0; val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE; val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(ch->id); host1x_hypervisor_writel(host, val, HOST1X_HV_CMDFIFO_PEEK_CTRL); val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_PEEK_PTRS); rd_ptr = HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(val); wr_ptr = HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(val); val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_SETUP(ch->id)); start = HOST1X_HV_CMDFIFO_SETUP_BASE_V(val); end = HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(val); do { val = 0; val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE; val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(ch->id); val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(rd_ptr); host1x_hypervisor_writel(host, val, HOST1X_HV_CMDFIFO_PEEK_CTRL); val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_PEEK_READ); if (!data_count) { host1x_debug_output(o, "%03x 0x%08x: ", rd_ptr - start, val); data_count = show_channel_command(o, val, &payload); } else { host1x_debug_cont(o, "%08x%s", val, data_count > 1 ? ", " : "])\n"); data_count--; } if (rd_ptr == end) rd_ptr = start; else rd_ptr++; } while (rd_ptr != wr_ptr); if (data_count) host1x_debug_cont(o, ", ...])\n"); host1x_debug_output(o, "\n"); host1x_hypervisor_writel(host, 0x0, HOST1X_HV_CMDFIFO_PEEK_CTRL); host1x_hypervisor_writel(host, 0x0, HOST1X_HV_ICG_EN_OVERRIDE); #endif } static void host1x_debug_show_mlocks(struct host1x *host, struct output *o) { /* TODO */ }
linux-master
drivers/gpu/host1x/hw/debug_hw_1x06.c
// SPDX-License-Identifier: GPL-2.0-only /* * Host1x init for Tegra194 SoCs * * Copyright (c) 2018 NVIDIA Corporation. */ /* include hw specification */ #include "host1x07.h" #include "host1x07_hardware.h" /* include code */ #define HOST1X_HW 7 #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" #include "intr_hw.c" #include "syncpt_hw.c" #include "../dev.h" int host1x07_init(struct host1x *host) { host->channel_op = &host1x_channel_ops; host->cdma_op = &host1x_cdma_ops; host->cdma_pb_op = &host1x_pushbuffer_ops; host->syncpt_op = &host1x_syncpt_ops; host->intr_op = &host1x_intr_ops; host->debug_op = &host1x_debug_ops; return 0; }
linux-master
drivers/gpu/host1x/hw/host1x07.c
// SPDX-License-Identifier: GPL-2.0-only /* * Tegra host1x Channel * * Copyright (c) 2010-2013, NVIDIA Corporation. */ #include <linux/host1x.h> #include <linux/iommu.h> #include <linux/slab.h> #include <trace/events/host1x.h> #include "../channel.h" #include "../dev.h" #include "../intr.h" #include "../job.h" #define TRACE_MAX_LENGTH 128U static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo, u32 offset, u32 words) { struct device *dev = cdma_to_channel(cdma)->dev; void *mem = NULL; if (host1x_debug_trace_cmdbuf) mem = host1x_bo_mmap(bo); if (mem) { u32 i; /* * Write in batches of 128 as there seems to be a limit * of how much you can output to ftrace at once. */ for (i = 0; i < words; i += TRACE_MAX_LENGTH) { u32 num_words = min(words - i, TRACE_MAX_LENGTH); offset += i * sizeof(u32); trace_host1x_cdma_push_gather(dev_name(dev), bo, num_words, offset, mem); } host1x_bo_munmap(bo, mem); } } static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, u32 next_class) { struct host1x_cdma *cdma = &job->channel->cdma; #if HOST1X_HW >= 6 u32 stream_id; /* * If a memory context has been set, use it. Otherwise * (if context isolation is disabled) use the engine's * firmware stream ID. */ if (job->memory_context) stream_id = job->memory_context->stream_id; else stream_id = job->engine_fallback_streamid; host1x_cdma_push_wide(cdma, host1x_opcode_setclass( HOST1X_CLASS_HOST1X, HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32, /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */ BIT(0) | BIT(2) ), threshold, id, HOST1X_OPCODE_NOP ); host1x_cdma_push_wide(&job->channel->cdma, host1x_opcode_setclass(job->class, 0, 0), host1x_opcode_setpayload(stream_id), host1x_opcode_setstreamid(job->engine_streamid_offset / 4), HOST1X_OPCODE_NOP); #elif HOST1X_HW >= 2 host1x_cdma_push_wide(cdma, host1x_opcode_setclass( HOST1X_CLASS_HOST1X, HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32, /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */ BIT(0) | BIT(2) ), threshold, id, host1x_opcode_setclass(next_class, 0, 0) ); #else /* TODO add waitchk or use waitbases or other mitigation */ host1x_cdma_push(cdma, host1x_opcode_setclass( HOST1X_CLASS_HOST1X, host1x_uclass_wait_syncpt_r(), BIT(0) ), host1x_class_host_wait_syncpt(id, threshold) ); host1x_cdma_push(cdma, host1x_opcode_setclass(next_class, 0, 0), HOST1X_OPCODE_NOP ); #endif } static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) { struct host1x_cdma *cdma = &job->channel->cdma; #if HOST1X_HW < 6 struct device *dev = job->channel->dev; #endif unsigned int i; u32 threshold; for (i = 0; i < job->num_cmds; i++) { struct host1x_job_cmd *cmd = &job->cmds[i]; if (cmd->is_wait) { if (cmd->wait.relative) threshold = job_syncpt_base + cmd->wait.threshold; else threshold = cmd->wait.threshold; submit_wait(job, cmd->wait.id, threshold, cmd->wait.next_class); } else { struct host1x_job_gather *g = &cmd->gather; dma_addr_t addr = g->base + g->offset; u32 op2, op3; op2 = lower_32_bits(addr); op3 = upper_32_bits(addr); trace_write_gather(cdma, g->bo, g->offset, g->words); if (op3 != 0) { #if HOST1X_HW >= 6 u32 op1 = host1x_opcode_gather_wide(g->words); u32 op4 = HOST1X_OPCODE_NOP; host1x_cdma_push_wide(cdma, op1, op2, op3, op4); #else dev_err(dev, "invalid gather for push buffer %pad\n", &addr); continue; #endif } else { u32 op1 = host1x_opcode_gather(g->words); host1x_cdma_push(cdma, op1, op2); } } } } static inline void synchronize_syncpt_base(struct host1x_job *job) { struct host1x_syncpt *sp = job->syncpt; unsigned int id; u32 value; value = host1x_syncpt_read_max(sp); id = sp->base->id; host1x_cdma_push(&job->channel->cdma, host1x_opcode_setclass(HOST1X_CLASS_HOST1X, HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1), HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) | HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value)); } static void host1x_channel_set_streamid(struct host1x_channel *channel) { #if HOST1X_HW >= 6 u32 stream_id; if (!tegra_dev_iommu_get_stream_id(channel->dev->parent, &stream_id)) stream_id = TEGRA_STREAM_ID_BYPASS; host1x_ch_writel(channel, stream_id, HOST1X_CHANNEL_SMMU_STREAMID); #endif } static void host1x_enable_gather_filter(struct host1x_channel *ch) { #if HOST1X_HW >= 6 struct host1x *host = dev_get_drvdata(ch->dev->parent); u32 val; if (!host->hv_regs) return; val = host1x_hypervisor_readl( host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32)); val |= BIT(ch->id % 32); host1x_hypervisor_writel( host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32)); #elif HOST1X_HW >= 4 host1x_ch_writel(ch, HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1), HOST1X_CHANNEL_CHANNELCTRL); #endif } static void channel_program_cdma(struct host1x_job *job) { struct host1x_cdma *cdma = &job->channel->cdma; struct host1x_syncpt *sp = job->syncpt; #if HOST1X_HW >= 6 u32 fence; /* Enter engine class with invalid stream ID. */ host1x_cdma_push_wide(cdma, host1x_opcode_acquire_mlock(job->class), host1x_opcode_setclass(job->class, 0, 0), host1x_opcode_setpayload(0), host1x_opcode_setstreamid(job->engine_streamid_offset / 4)); /* Before switching stream ID to real stream ID, ensure engine is idle. */ fence = host1x_syncpt_incr_max(sp, 1); host1x_cdma_push(&job->channel->cdma, host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1), HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) | HOST1X_UCLASS_INCR_SYNCPT_COND_F(4)); submit_wait(job, job->syncpt->id, fence, job->class); /* Submit work. */ job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs); submit_gathers(job, job->syncpt_end - job->syncpt_incrs); /* Before releasing MLOCK, ensure engine is idle again. */ fence = host1x_syncpt_incr_max(sp, 1); host1x_cdma_push(&job->channel->cdma, host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1), HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) | HOST1X_UCLASS_INCR_SYNCPT_COND_F(4)); submit_wait(job, job->syncpt->id, fence, job->class); /* Release MLOCK. */ host1x_cdma_push(cdma, HOST1X_OPCODE_NOP, host1x_opcode_release_mlock(job->class)); #else if (job->serialize) { /* * Force serialization by inserting a host wait for the * previous job to finish before this one can commence. */ host1x_cdma_push(cdma, host1x_opcode_setclass(HOST1X_CLASS_HOST1X, host1x_uclass_wait_syncpt_r(), 1), host1x_class_host_wait_syncpt(job->syncpt->id, host1x_syncpt_read_max(sp))); } /* Synchronize base register to allow using it for relative waiting */ if (sp->base) synchronize_syncpt_base(job); /* add a setclass for modules that require it */ if (job->class) host1x_cdma_push(cdma, host1x_opcode_setclass(job->class, 0, 0), HOST1X_OPCODE_NOP); job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs); submit_gathers(job, job->syncpt_end - job->syncpt_incrs); #endif } static void job_complete_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { struct host1x_job *job = container_of(cb, struct host1x_job, fence_cb); /* Schedules CDMA update. */ host1x_cdma_update(&job->channel->cdma); } static int channel_submit(struct host1x_job *job) { struct host1x_channel *ch = job->channel; struct host1x_syncpt *sp = job->syncpt; u32 prev_max = 0; u32 syncval; int err; struct host1x *host = dev_get_drvdata(ch->dev->parent); trace_host1x_channel_submit(dev_name(ch->dev), job->num_cmds, job->num_relocs, job->syncpt->id, job->syncpt_incrs); /* before error checks, return current max */ prev_max = job->syncpt_end = host1x_syncpt_read_max(sp); /* get submit lock */ err = mutex_lock_interruptible(&ch->submitlock); if (err) return err; host1x_channel_set_streamid(ch); host1x_enable_gather_filter(ch); host1x_hw_syncpt_assign_to_channel(host, sp, ch); /* begin a CDMA submit */ err = host1x_cdma_begin(&ch->cdma, job); if (err) { mutex_unlock(&ch->submitlock); return err; } channel_program_cdma(job); syncval = host1x_syncpt_read_max(sp); /* * Create fence before submitting job to HW to avoid job completing * before the fence is set up. */ job->fence = host1x_fence_create(sp, syncval, true); if (WARN(IS_ERR(job->fence), "Failed to create submit complete fence")) { job->fence = NULL; } else { err = dma_fence_add_callback(job->fence, &job->fence_cb, job_complete_callback); } /* end CDMA submit & stash pinned hMems into sync queue */ host1x_cdma_end(&ch->cdma, job); trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval); mutex_unlock(&ch->submitlock); if (err == -ENOENT) host1x_cdma_update(&ch->cdma); else WARN(err, "Failed to set submit complete interrupt"); return 0; } static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev, unsigned int index) { #if HOST1X_HW < 6 ch->regs = dev->regs + index * 0x4000; #else ch->regs = dev->regs + index * 0x100; #endif return 0; } static const struct host1x_channel_ops host1x_channel_ops = { .init = host1x_channel_init, .submit = channel_submit, };
linux-master
drivers/gpu/host1x/hw/channel_hw.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 Google, Inc. * Author: Erik Gilling <[email protected]> * * Copyright (C) 2011-2013 NVIDIA Corporation */ #include "../dev.h" #include "../debug.h" #include "../cdma.h" #include "../channel.h" static void host1x_debug_show_channel_cdma(struct host1x *host, struct host1x_channel *ch, struct output *o) { struct host1x_cdma *cdma = &ch->cdma; dma_addr_t dmastart, dmaend; u32 dmaput, dmaget, dmactrl; u32 cbstat, cbread; u32 val, base, baseval; dmastart = host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART); dmaend = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND); dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT); dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET); dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL); cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id)); cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id)); host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev)); if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) || !ch->cdma.push_buffer.mapped) { host1x_debug_output(o, "inactive\n\n"); return; } if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X && HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == HOST1X_UCLASS_WAIT_SYNCPT) host1x_debug_output(o, "waiting on syncpt %d val %d\n", cbread >> 24, cbread & 0xffffff); else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X && HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == HOST1X_UCLASS_WAIT_SYNCPT_BASE) { base = (cbread >> 16) & 0xff; baseval = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base)); val = cbread & 0xffff; host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n", cbread >> 24, baseval + val, base, baseval, val); } else host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n", HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat), HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat), cbread); host1x_debug_output(o, "DMASTART %pad, DMAEND %pad\n", &dmastart, &dmaend); host1x_debug_output(o, "DMAPUT %08x DMAGET %08x DMACTL %08x\n", dmaput, dmaget, dmactrl); host1x_debug_output(o, "CBREAD %08x CBSTAT %08x\n", cbread, cbstat); show_channel_gathers(o, cdma); host1x_debug_output(o, "\n"); } static void host1x_debug_show_channel_fifo(struct host1x *host, struct host1x_channel *ch, struct output *o) { u32 val, rd_ptr, wr_ptr, start, end; unsigned int data_count = 0; host1x_debug_output(o, "%u: fifo:\n", ch->id); val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT); host1x_debug_output(o, "FIFOSTAT %08x\n", val); if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) { host1x_debug_output(o, "[empty]\n"); return; } host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) | HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id), HOST1X_SYNC_CFPEEK_CTRL); val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS); rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val); wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val); val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id)); start = HOST1X_SYNC_CF_SETUP_BASE_V(val); end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val); do { host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) | HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) | HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr), HOST1X_SYNC_CFPEEK_CTRL); val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ); if (!data_count) { host1x_debug_output(o, "%08x: ", val); data_count = show_channel_command(o, val, NULL); } else { host1x_debug_cont(o, "%08x%s", val, data_count > 1 ? ", " : "])\n"); data_count--; } if (rd_ptr == end) rd_ptr = start; else rd_ptr++; } while (rd_ptr != wr_ptr); if (data_count) host1x_debug_cont(o, ", ...])\n"); host1x_debug_output(o, "\n"); host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); } static void host1x_debug_show_mlocks(struct host1x *host, struct output *o) { unsigned int i; host1x_debug_output(o, "---- mlocks ----\n"); for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) { u32 owner = host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i)); if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner)) host1x_debug_output(o, "%u: locked by channel %u\n", i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner)); else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner)) host1x_debug_output(o, "%u: locked by cpu\n", i); else host1x_debug_output(o, "%u: unlocked\n", i); } host1x_debug_output(o, "\n"); }
linux-master
drivers/gpu/host1x/hw/debug_hw_1x01.c
// SPDX-License-Identifier: GPL-2.0-only /* * Host1x init for Tegra124 SoCs * * Copyright (c) 2013 NVIDIA Corporation. */ /* include hw specification */ #include "host1x04.h" #include "host1x04_hardware.h" /* include code */ #define HOST1X_HW 4 #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" #include "intr_hw.c" #include "syncpt_hw.c" #include "../dev.h" int host1x04_init(struct host1x *host) { host->channel_op = &host1x_channel_ops; host->cdma_op = &host1x_cdma_ops; host->cdma_pb_op = &host1x_pushbuffer_ops; host->syncpt_op = &host1x_syncpt_ops; host->intr_op = &host1x_intr_ops; host->debug_op = &host1x_debug_ops; return 0; }
linux-master
drivers/gpu/host1x/hw/host1x04.c
// SPDX-License-Identifier: GPL-2.0-only /* * Industry-pack bus support functions. * * Copyright (C) 2011-2012 CERN (www.cern.ch) * Author: Samuel Iglesias Gonsalvez <[email protected]> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/io.h> #include <linux/ipack.h> #define to_ipack_dev(device) container_of(device, struct ipack_device, dev) #define to_ipack_driver(drv) container_of(drv, struct ipack_driver, driver) static DEFINE_IDA(ipack_ida); static void ipack_device_release(struct device *dev) { struct ipack_device *device = to_ipack_dev(dev); kfree(device->id); device->release(device); } static inline const struct ipack_device_id * ipack_match_one_device(const struct ipack_device_id *id, const struct ipack_device *device) { if ((id->format == IPACK_ANY_FORMAT || id->format == device->id_format) && (id->vendor == IPACK_ANY_ID || id->vendor == device->id_vendor) && (id->device == IPACK_ANY_ID || id->device == device->id_device)) return id; return NULL; } static const struct ipack_device_id * ipack_match_id(const struct ipack_device_id *ids, struct ipack_device *idev) { if (ids) { while (ids->vendor || ids->device) { if (ipack_match_one_device(ids, idev)) return ids; ids++; } } return NULL; } static int ipack_bus_match(struct device *dev, struct device_driver *drv) { struct ipack_device *idev = to_ipack_dev(dev); struct ipack_driver *idrv = to_ipack_driver(drv); const struct ipack_device_id *found_id; found_id = ipack_match_id(idrv->id_table, idev); return found_id ? 1 : 0; } static int ipack_bus_probe(struct device *device) { struct ipack_device *dev = to_ipack_dev(device); struct ipack_driver *drv = to_ipack_driver(device->driver); return drv->ops->probe(dev); } static void ipack_bus_remove(struct device *device) { struct ipack_device *dev = to_ipack_dev(device); struct ipack_driver *drv = to_ipack_driver(device->driver); if (drv->ops->remove) drv->ops->remove(dev); } static int ipack_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct ipack_device *idev; if (!dev) return -ENODEV; idev = to_ipack_dev(dev); if (add_uevent_var(env, "MODALIAS=ipack:f%02Xv%08Xd%08X", idev->id_format, idev->id_vendor, idev->id_device)) return -ENOMEM; return 0; } #define ipack_device_attr(field, format_string) \ static ssize_t \ field##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct ipack_device *idev = to_ipack_dev(dev); \ return sprintf(buf, format_string, idev->field); \ } static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int i, c, l, s; struct ipack_device *idev = to_ipack_dev(dev); switch (idev->id_format) { case IPACK_ID_VERSION_1: l = 0x7; s = 1; break; case IPACK_ID_VERSION_2: l = 0xf; s = 2; break; default: return -EIO; } c = 0; for (i = 0; i < idev->id_avail; i++) { if (i > 0) { if ((i & l) == 0) buf[c++] = '\n'; else if ((i & s) == 0) buf[c++] = ' '; } sprintf(&buf[c], "%02x", idev->id[i]); c += 2; } buf[c++] = '\n'; return c; } static ssize_t id_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipack_device *idev = to_ipack_dev(dev); switch (idev->id_format) { case IPACK_ID_VERSION_1: return sprintf(buf, "0x%02x\n", idev->id_vendor); case IPACK_ID_VERSION_2: return sprintf(buf, "0x%06x\n", idev->id_vendor); default: return -EIO; } } static ssize_t id_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipack_device *idev = to_ipack_dev(dev); switch (idev->id_format) { case IPACK_ID_VERSION_1: return sprintf(buf, "0x%02x\n", idev->id_device); case IPACK_ID_VERSION_2: return sprintf(buf, "0x%04x\n", idev->id_device); default: return -EIO; } } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipack_device *idev = to_ipack_dev(dev); return sprintf(buf, "ipac:f%02Xv%08Xd%08X", idev->id_format, idev->id_vendor, idev->id_device); } ipack_device_attr(id_format, "0x%hhx\n"); static DEVICE_ATTR_RO(id); static DEVICE_ATTR_RO(id_device); static DEVICE_ATTR_RO(id_format); static DEVICE_ATTR_RO(id_vendor); static DEVICE_ATTR_RO(modalias); static struct attribute *ipack_attrs[] = { &dev_attr_id.attr, &dev_attr_id_device.attr, &dev_attr_id_format.attr, &dev_attr_id_vendor.attr, &dev_attr_modalias.attr, NULL, }; ATTRIBUTE_GROUPS(ipack); static struct bus_type ipack_bus_type = { .name = "ipack", .probe = ipack_bus_probe, .match = ipack_bus_match, .remove = ipack_bus_remove, .dev_groups = ipack_groups, .uevent = ipack_uevent, }; struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, const struct ipack_bus_ops *ops, struct module *owner) { int bus_nr; struct ipack_bus_device *bus; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) return NULL; bus_nr = ida_simple_get(&ipack_ida, 0, 0, GFP_KERNEL); if (bus_nr < 0) { kfree(bus); return NULL; } bus->bus_nr = bus_nr; bus->parent = parent; bus->slots = slots; bus->ops = ops; bus->owner = owner; return bus; } EXPORT_SYMBOL_GPL(ipack_bus_register); static int ipack_unregister_bus_member(struct device *dev, void *data) { struct ipack_device *idev = to_ipack_dev(dev); struct ipack_bus_device *bus = data; if (idev->bus == bus) ipack_device_del(idev); return 1; } int ipack_bus_unregister(struct ipack_bus_device *bus) { bus_for_each_dev(&ipack_bus_type, NULL, bus, ipack_unregister_bus_member); ida_simple_remove(&ipack_ida, bus->bus_nr); kfree(bus); return 0; } EXPORT_SYMBOL_GPL(ipack_bus_unregister); int ipack_driver_register(struct ipack_driver *edrv, struct module *owner, const char *name) { if (!edrv->ops->probe) return -EINVAL; edrv->driver.owner = owner; edrv->driver.name = name; edrv->driver.bus = &ipack_bus_type; return driver_register(&edrv->driver); } EXPORT_SYMBOL_GPL(ipack_driver_register); void ipack_driver_unregister(struct ipack_driver *edrv) { driver_unregister(&edrv->driver); } EXPORT_SYMBOL_GPL(ipack_driver_unregister); static u16 ipack_crc_byte(u16 crc, u8 c) { int i; crc ^= c << 8; for (i = 0; i < 8; i++) crc = (crc << 1) ^ ((crc & 0x8000) ? 0x1021 : 0); return crc; } /* * The algorithm in lib/crc-ccitt.c does not seem to apply since it uses the * opposite bit ordering. */ static u8 ipack_calc_crc1(struct ipack_device *dev) { u8 c; u16 crc; unsigned int i; crc = 0xffff; for (i = 0; i < dev->id_avail; i++) { c = (i != 11) ? dev->id[i] : 0; crc = ipack_crc_byte(crc, c); } crc = ~crc; return crc & 0xff; } static u16 ipack_calc_crc2(struct ipack_device *dev) { u8 c; u16 crc; unsigned int i; crc = 0xffff; for (i = 0; i < dev->id_avail; i++) { c = ((i != 0x18) && (i != 0x19)) ? dev->id[i] : 0; crc = ipack_crc_byte(crc, c); } crc = ~crc; return crc; } static void ipack_parse_id1(struct ipack_device *dev) { u8 *id = dev->id; u8 crc; dev->id_vendor = id[4]; dev->id_device = id[5]; dev->speed_8mhz = 1; dev->speed_32mhz = (id[7] == 'H'); crc = ipack_calc_crc1(dev); dev->id_crc_correct = (crc == id[11]); if (!dev->id_crc_correct) { dev_warn(&dev->dev, "ID CRC invalid found 0x%x, expected 0x%x.\n", id[11], crc); } } static void ipack_parse_id2(struct ipack_device *dev) { __be16 *id = (__be16 *) dev->id; u16 flags, crc; dev->id_vendor = ((be16_to_cpu(id[3]) & 0xff) << 16) + be16_to_cpu(id[4]); dev->id_device = be16_to_cpu(id[5]); flags = be16_to_cpu(id[10]); dev->speed_8mhz = !!(flags & 2); dev->speed_32mhz = !!(flags & 4); crc = ipack_calc_crc2(dev); dev->id_crc_correct = (crc == be16_to_cpu(id[12])); if (!dev->id_crc_correct) { dev_warn(&dev->dev, "ID CRC invalid found 0x%x, expected 0x%x.\n", id[11], crc); } } static int ipack_device_read_id(struct ipack_device *dev) { u8 __iomem *idmem; int i; int ret = 0; idmem = ioremap(dev->region[IPACK_ID_SPACE].start, dev->region[IPACK_ID_SPACE].size); if (!idmem) { dev_err(&dev->dev, "error mapping memory\n"); return -ENOMEM; } /* Determine ID PROM Data Format. If we find the ids "IPAC" or "IPAH" * we are dealing with a IndustryPack format 1 device. If we detect * "VITA4 " (16 bit big endian formatted) we are dealing with a * IndustryPack format 2 device */ if ((ioread8(idmem + 1) == 'I') && (ioread8(idmem + 3) == 'P') && (ioread8(idmem + 5) == 'A') && ((ioread8(idmem + 7) == 'C') || (ioread8(idmem + 7) == 'H'))) { dev->id_format = IPACK_ID_VERSION_1; dev->id_avail = ioread8(idmem + 0x15); if ((dev->id_avail < 0x0c) || (dev->id_avail > 0x40)) { dev_warn(&dev->dev, "invalid id size"); dev->id_avail = 0x0c; } } else if ((ioread8(idmem + 0) == 'I') && (ioread8(idmem + 1) == 'V') && (ioread8(idmem + 2) == 'A') && (ioread8(idmem + 3) == 'T') && (ioread8(idmem + 4) == ' ') && (ioread8(idmem + 5) == '4')) { dev->id_format = IPACK_ID_VERSION_2; dev->id_avail = ioread16be(idmem + 0x16); if ((dev->id_avail < 0x1a) || (dev->id_avail > 0x40)) { dev_warn(&dev->dev, "invalid id size"); dev->id_avail = 0x1a; } } else { dev->id_format = IPACK_ID_VERSION_INVALID; dev->id_avail = 0; } if (!dev->id_avail) { ret = -ENODEV; goto out; } /* Obtain the amount of memory required to store a copy of the complete * ID ROM contents */ dev->id = kmalloc(dev->id_avail, GFP_KERNEL); if (!dev->id) { ret = -ENOMEM; goto out; } for (i = 0; i < dev->id_avail; i++) { if (dev->id_format == IPACK_ID_VERSION_1) dev->id[i] = ioread8(idmem + (i << 1) + 1); else dev->id[i] = ioread8(idmem + i); } /* now we can finally work with the copy */ switch (dev->id_format) { case IPACK_ID_VERSION_1: ipack_parse_id1(dev); break; case IPACK_ID_VERSION_2: ipack_parse_id2(dev); break; } out: iounmap(idmem); return ret; } int ipack_device_init(struct ipack_device *dev) { int ret; dev->dev.bus = &ipack_bus_type; dev->dev.release = ipack_device_release; dev->dev.parent = dev->bus->parent; ret = dev_set_name(&dev->dev, "ipack-dev.%u.%u", dev->bus->bus_nr, dev->slot); if (ret) return ret; device_initialize(&dev->dev); if (dev->bus->ops->set_clockrate(dev, 8)) dev_warn(&dev->dev, "failed to switch to 8 MHz operation for reading of device ID.\n"); if (dev->bus->ops->reset_timeout(dev)) dev_warn(&dev->dev, "failed to reset potential timeout."); ret = ipack_device_read_id(dev); if (ret < 0) { dev_err(&dev->dev, "error reading device id section.\n"); return ret; } /* if the device supports 32 MHz operation, use it. */ if (dev->speed_32mhz) { ret = dev->bus->ops->set_clockrate(dev, 32); if (ret < 0) dev_err(&dev->dev, "failed to switch to 32 MHz operation.\n"); } return 0; } EXPORT_SYMBOL_GPL(ipack_device_init); int ipack_device_add(struct ipack_device *dev) { return device_add(&dev->dev); } EXPORT_SYMBOL_GPL(ipack_device_add); void ipack_device_del(struct ipack_device *dev) { device_del(&dev->dev); ipack_put_device(dev); } EXPORT_SYMBOL_GPL(ipack_device_del); void ipack_get_device(struct ipack_device *dev) { get_device(&dev->dev); } EXPORT_SYMBOL_GPL(ipack_get_device); void ipack_put_device(struct ipack_device *dev) { put_device(&dev->dev); } EXPORT_SYMBOL_GPL(ipack_put_device); static int __init ipack_init(void) { ida_init(&ipack_ida); return bus_register(&ipack_bus_type); } static void __exit ipack_exit(void) { bus_unregister(&ipack_bus_type); ida_destroy(&ipack_ida); } module_init(ipack_init); module_exit(ipack_exit); MODULE_AUTHOR("Samuel Iglesias Gonsalvez <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Industry-pack bus core");
linux-master
drivers/ipack/ipack.c
// SPDX-License-Identifier: GPL-2.0-only /* * driver for the TEWS TPCI-200 device * * Copyright (C) 2009-2012 CERN (www.cern.ch) * Author: Nicolas Serafini, EIC2 SA * Author: Samuel Iglesias Gonsalvez <[email protected]> */ #include <linux/module.h> #include <linux/slab.h> #include "tpci200.h" static const u16 tpci200_status_timeout[] = { TPCI200_A_TIMEOUT, TPCI200_B_TIMEOUT, TPCI200_C_TIMEOUT, TPCI200_D_TIMEOUT, }; static const u16 tpci200_status_error[] = { TPCI200_A_ERROR, TPCI200_B_ERROR, TPCI200_C_ERROR, TPCI200_D_ERROR, }; static const size_t tpci200_space_size[IPACK_SPACE_COUNT] = { [IPACK_IO_SPACE] = TPCI200_IO_SPACE_SIZE, [IPACK_ID_SPACE] = TPCI200_ID_SPACE_SIZE, [IPACK_INT_SPACE] = TPCI200_INT_SPACE_SIZE, [IPACK_MEM8_SPACE] = TPCI200_MEM8_SPACE_SIZE, [IPACK_MEM16_SPACE] = TPCI200_MEM16_SPACE_SIZE, }; static const size_t tpci200_space_interval[IPACK_SPACE_COUNT] = { [IPACK_IO_SPACE] = TPCI200_IO_SPACE_INTERVAL, [IPACK_ID_SPACE] = TPCI200_ID_SPACE_INTERVAL, [IPACK_INT_SPACE] = TPCI200_INT_SPACE_INTERVAL, [IPACK_MEM8_SPACE] = TPCI200_MEM8_SPACE_INTERVAL, [IPACK_MEM16_SPACE] = TPCI200_MEM16_SPACE_INTERVAL, }; static struct tpci200_board *check_slot(struct ipack_device *dev) { struct tpci200_board *tpci200; if (dev == NULL) return NULL; tpci200 = dev_get_drvdata(dev->bus->parent); if (tpci200 == NULL) { dev_info(&dev->dev, "carrier board not found\n"); return NULL; } if (dev->slot >= TPCI200_NB_SLOT) { dev_info(&dev->dev, "Slot [%d:%d] doesn't exist! Last tpci200 slot is %d.\n", dev->bus->bus_nr, dev->slot, TPCI200_NB_SLOT-1); return NULL; } return tpci200; } static void tpci200_clear_mask(struct tpci200_board *tpci200, __le16 __iomem *addr, u16 mask) { unsigned long flags; spin_lock_irqsave(&tpci200->regs_lock, flags); iowrite16(ioread16(addr) & (~mask), addr); spin_unlock_irqrestore(&tpci200->regs_lock, flags); } static void tpci200_set_mask(struct tpci200_board *tpci200, __le16 __iomem *addr, u16 mask) { unsigned long flags; spin_lock_irqsave(&tpci200->regs_lock, flags); iowrite16(ioread16(addr) | mask, addr); spin_unlock_irqrestore(&tpci200->regs_lock, flags); } static void tpci200_unregister(struct tpci200_board *tpci200) { free_irq(tpci200->info->pdev->irq, (void *) tpci200); pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); pci_disable_device(tpci200->info->pdev); } static void tpci200_enable_irq(struct tpci200_board *tpci200, int islot) { tpci200_set_mask(tpci200, &tpci200->info->interface_regs->control[islot], TPCI200_INT0_EN | TPCI200_INT1_EN); } static void tpci200_disable_irq(struct tpci200_board *tpci200, int islot) { tpci200_clear_mask(tpci200, &tpci200->info->interface_regs->control[islot], TPCI200_INT0_EN | TPCI200_INT1_EN); } static irqreturn_t tpci200_slot_irq(struct slot_irq *slot_irq) { irqreturn_t ret; if (!slot_irq) return -ENODEV; ret = slot_irq->handler(slot_irq->arg); return ret; } static irqreturn_t tpci200_interrupt(int irq, void *dev_id) { struct tpci200_board *tpci200 = (struct tpci200_board *) dev_id; struct slot_irq *slot_irq; irqreturn_t ret; u16 status_reg; int i; /* Read status register */ status_reg = ioread16(&tpci200->info->interface_regs->status); /* Did we cause the interrupt? */ if (!(status_reg & TPCI200_SLOT_INT_MASK)) return IRQ_NONE; /* callback to the IRQ handler for the corresponding slot */ rcu_read_lock(); for (i = 0; i < TPCI200_NB_SLOT; i++) { if (!(status_reg & ((TPCI200_A_INT0 | TPCI200_A_INT1) << (2 * i)))) continue; slot_irq = rcu_dereference(tpci200->slots[i].irq); ret = tpci200_slot_irq(slot_irq); if (ret == -ENODEV) { dev_info(&tpci200->info->pdev->dev, "No registered ISR for slot [%d:%d]!. IRQ will be disabled.\n", tpci200->number, i); tpci200_disable_irq(tpci200, i); } } rcu_read_unlock(); return IRQ_HANDLED; } static int tpci200_free_irq(struct ipack_device *dev) { struct slot_irq *slot_irq; struct tpci200_board *tpci200; tpci200 = check_slot(dev); if (tpci200 == NULL) return -EINVAL; if (mutex_lock_interruptible(&tpci200->mutex)) return -ERESTARTSYS; if (tpci200->slots[dev->slot].irq == NULL) { mutex_unlock(&tpci200->mutex); return -EINVAL; } tpci200_disable_irq(tpci200, dev->slot); slot_irq = tpci200->slots[dev->slot].irq; /* uninstall handler */ RCU_INIT_POINTER(tpci200->slots[dev->slot].irq, NULL); synchronize_rcu(); kfree(slot_irq); mutex_unlock(&tpci200->mutex); return 0; } static int tpci200_request_irq(struct ipack_device *dev, irqreturn_t (*handler)(void *), void *arg) { int res = 0; struct slot_irq *slot_irq; struct tpci200_board *tpci200; tpci200 = check_slot(dev); if (tpci200 == NULL) return -EINVAL; if (mutex_lock_interruptible(&tpci200->mutex)) return -ERESTARTSYS; if (tpci200->slots[dev->slot].irq != NULL) { dev_err(&dev->dev, "Slot [%d:%d] IRQ already registered !\n", dev->bus->bus_nr, dev->slot); res = -EINVAL; goto out_unlock; } slot_irq = kzalloc(sizeof(struct slot_irq), GFP_KERNEL); if (slot_irq == NULL) { dev_err(&dev->dev, "Slot [%d:%d] unable to allocate memory for IRQ !\n", dev->bus->bus_nr, dev->slot); res = -ENOMEM; goto out_unlock; } /* * WARNING: Setup Interrupt Vector in the IndustryPack device * before an IRQ request. * Read the User Manual of your IndustryPack device to know * where to write the vector in memory. */ slot_irq->handler = handler; slot_irq->arg = arg; slot_irq->holder = dev; rcu_assign_pointer(tpci200->slots[dev->slot].irq, slot_irq); tpci200_enable_irq(tpci200, dev->slot); out_unlock: mutex_unlock(&tpci200->mutex); return res; } static int tpci200_register(struct tpci200_board *tpci200) { int i; int res; phys_addr_t ioidint_base; unsigned short slot_ctrl; if (pci_enable_device(tpci200->info->pdev) < 0) return -ENODEV; /* Request IP interface register (Bar 2) */ res = pci_request_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR, "Carrier IP interface registers"); if (res) { dev_err(&tpci200->info->pdev->dev, "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); goto err_disable_device; } /* Request IO ID INT space (Bar 3) */ res = pci_request_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR, "Carrier IO ID INT space"); if (res) { dev_err(&tpci200->info->pdev->dev, "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); goto err_ip_interface_bar; } /* Request MEM8 space (Bar 5) */ res = pci_request_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR, "Carrier MEM8 space"); if (res) { dev_err(&tpci200->info->pdev->dev, "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); goto err_io_id_int_spaces_bar; } /* Request MEM16 space (Bar 4) */ res = pci_request_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR, "Carrier MEM16 space"); if (res) { dev_err(&tpci200->info->pdev->dev, "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); goto err_mem8_space_bar; } /* Map internal tpci200 driver user space */ tpci200->info->interface_regs = ioremap(pci_resource_start(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR), TPCI200_IFACE_SIZE); if (!tpci200->info->interface_regs) { dev_err(&tpci200->info->pdev->dev, "(bn 0x%X, sn 0x%X) failed to map driver user space!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); res = -ENOMEM; goto err_mem16_space_bar; } /* Initialize lock that protects interface_regs */ spin_lock_init(&tpci200->regs_lock); ioidint_base = pci_resource_start(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); tpci200->mod_mem[IPACK_IO_SPACE] = ioidint_base + TPCI200_IO_SPACE_OFF; tpci200->mod_mem[IPACK_ID_SPACE] = ioidint_base + TPCI200_ID_SPACE_OFF; tpci200->mod_mem[IPACK_INT_SPACE] = ioidint_base + TPCI200_INT_SPACE_OFF; tpci200->mod_mem[IPACK_MEM8_SPACE] = pci_resource_start(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); tpci200->mod_mem[IPACK_MEM16_SPACE] = pci_resource_start(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); /* Set the default parameters of the slot * INT0 disabled, level sensitive * INT1 disabled, level sensitive * error interrupt disabled * timeout interrupt disabled * recover time disabled * clock rate 8 MHz */ slot_ctrl = 0; for (i = 0; i < TPCI200_NB_SLOT; i++) writew(slot_ctrl, &tpci200->info->interface_regs->control[i]); res = request_irq(tpci200->info->pdev->irq, tpci200_interrupt, IRQF_SHARED, KBUILD_MODNAME, (void *) tpci200); if (res) { dev_err(&tpci200->info->pdev->dev, "(bn 0x%X, sn 0x%X) unable to register IRQ !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); goto err_interface_regs; } return 0; err_interface_regs: pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); err_mem16_space_bar: pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); err_mem8_space_bar: pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); err_io_id_int_spaces_bar: pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); err_ip_interface_bar: pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); err_disable_device: pci_disable_device(tpci200->info->pdev); return res; } static int tpci200_get_clockrate(struct ipack_device *dev) { struct tpci200_board *tpci200 = check_slot(dev); __le16 __iomem *addr; if (!tpci200) return -ENODEV; addr = &tpci200->info->interface_regs->control[dev->slot]; return (ioread16(addr) & TPCI200_CLK32) ? 32 : 8; } static int tpci200_set_clockrate(struct ipack_device *dev, int mherz) { struct tpci200_board *tpci200 = check_slot(dev); __le16 __iomem *addr; if (!tpci200) return -ENODEV; addr = &tpci200->info->interface_regs->control[dev->slot]; switch (mherz) { case 8: tpci200_clear_mask(tpci200, addr, TPCI200_CLK32); break; case 32: tpci200_set_mask(tpci200, addr, TPCI200_CLK32); break; default: return -EINVAL; } return 0; } static int tpci200_get_error(struct ipack_device *dev) { struct tpci200_board *tpci200 = check_slot(dev); __le16 __iomem *addr; u16 mask; if (!tpci200) return -ENODEV; addr = &tpci200->info->interface_regs->status; mask = tpci200_status_error[dev->slot]; return (ioread16(addr) & mask) ? 1 : 0; } static int tpci200_get_timeout(struct ipack_device *dev) { struct tpci200_board *tpci200 = check_slot(dev); __le16 __iomem *addr; u16 mask; if (!tpci200) return -ENODEV; addr = &tpci200->info->interface_regs->status; mask = tpci200_status_timeout[dev->slot]; return (ioread16(addr) & mask) ? 1 : 0; } static int tpci200_reset_timeout(struct ipack_device *dev) { struct tpci200_board *tpci200 = check_slot(dev); __le16 __iomem *addr; u16 mask; if (!tpci200) return -ENODEV; addr = &tpci200->info->interface_regs->status; mask = tpci200_status_timeout[dev->slot]; iowrite16(mask, addr); return 0; } static void tpci200_uninstall(struct tpci200_board *tpci200) { tpci200_unregister(tpci200); kfree(tpci200->slots); } static const struct ipack_bus_ops tpci200_bus_ops = { .request_irq = tpci200_request_irq, .free_irq = tpci200_free_irq, .get_clockrate = tpci200_get_clockrate, .set_clockrate = tpci200_set_clockrate, .get_error = tpci200_get_error, .get_timeout = tpci200_get_timeout, .reset_timeout = tpci200_reset_timeout, }; static int tpci200_install(struct tpci200_board *tpci200) { int res; tpci200->slots = kcalloc(TPCI200_NB_SLOT, sizeof(struct tpci200_slot), GFP_KERNEL); if (tpci200->slots == NULL) return -ENOMEM; res = tpci200_register(tpci200); if (res) { kfree(tpci200->slots); tpci200->slots = NULL; return res; } mutex_init(&tpci200->mutex); return 0; } static void tpci200_release_device(struct ipack_device *dev) { kfree(dev); } static int tpci200_create_device(struct tpci200_board *tpci200, int i) { int ret; enum ipack_space space; struct ipack_device *dev = kzalloc(sizeof(struct ipack_device), GFP_KERNEL); if (!dev) return -ENOMEM; dev->slot = i; dev->bus = tpci200->info->ipack_bus; dev->release = tpci200_release_device; for (space = 0; space < IPACK_SPACE_COUNT; space++) { dev->region[space].start = tpci200->mod_mem[space] + tpci200_space_interval[space] * i; dev->region[space].size = tpci200_space_size[space]; } ret = ipack_device_init(dev); if (ret < 0) { ipack_put_device(dev); return ret; } ret = ipack_device_add(dev); if (ret < 0) ipack_put_device(dev); return ret; } static int tpci200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret, i; struct tpci200_board *tpci200; u32 reg32; tpci200 = kzalloc(sizeof(struct tpci200_board), GFP_KERNEL); if (!tpci200) return -ENOMEM; tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL); if (!tpci200->info) { ret = -ENOMEM; goto err_tpci200; } pci_dev_get(pdev); /* Obtain a mapping of the carrier's PCI configuration registers */ ret = pci_request_region(pdev, TPCI200_CFG_MEM_BAR, KBUILD_MODNAME " Configuration Memory"); if (ret) { dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory"); ret = -EBUSY; goto err_tpci200_info; } tpci200->info->cfg_regs = ioremap( pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), pci_resource_len(pdev, TPCI200_CFG_MEM_BAR)); if (!tpci200->info->cfg_regs) { dev_err(&pdev->dev, "Failed to map PCI Configuration Memory"); ret = -EFAULT; goto err_request_region; } /* Disable byte swapping for 16 bit IP module access. This will ensure * that the Industrypack big endian byte order is preserved by the * carrier. */ reg32 = ioread32(tpci200->info->cfg_regs + LAS1_DESC); reg32 |= 1 << LAS_BIT_BIGENDIAN; iowrite32(reg32, tpci200->info->cfg_regs + LAS1_DESC); reg32 = ioread32(tpci200->info->cfg_regs + LAS2_DESC); reg32 |= 1 << LAS_BIT_BIGENDIAN; iowrite32(reg32, tpci200->info->cfg_regs + LAS2_DESC); /* Save struct pci_dev pointer */ tpci200->info->pdev = pdev; tpci200->info->id_table = (struct pci_device_id *)id; /* register the device and initialize it */ ret = tpci200_install(tpci200); if (ret) { dev_err(&pdev->dev, "error during tpci200 install\n"); ret = -ENODEV; goto err_cfg_regs; } /* Register the carrier in the industry pack bus driver */ tpci200->info->ipack_bus = ipack_bus_register(&pdev->dev, TPCI200_NB_SLOT, &tpci200_bus_ops, THIS_MODULE); if (!tpci200->info->ipack_bus) { dev_err(&pdev->dev, "error registering the carrier on ipack driver\n"); ret = -EFAULT; goto err_tpci200_install; } /* save the bus number given by ipack to logging purpose */ tpci200->number = tpci200->info->ipack_bus->bus_nr; dev_set_drvdata(&pdev->dev, tpci200); for (i = 0; i < TPCI200_NB_SLOT; i++) tpci200_create_device(tpci200, i); return 0; err_tpci200_install: tpci200_uninstall(tpci200); err_cfg_regs: pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); err_request_region: pci_release_region(pdev, TPCI200_CFG_MEM_BAR); err_tpci200_info: kfree(tpci200->info); pci_dev_put(pdev); err_tpci200: kfree(tpci200); return ret; } static void __tpci200_pci_remove(struct tpci200_board *tpci200) { ipack_bus_unregister(tpci200->info->ipack_bus); tpci200_uninstall(tpci200); pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); pci_dev_put(tpci200->info->pdev); kfree(tpci200->info); kfree(tpci200); } static void tpci200_pci_remove(struct pci_dev *dev) { struct tpci200_board *tpci200 = pci_get_drvdata(dev); __tpci200_pci_remove(tpci200); } static const struct pci_device_id tpci200_idtable[] = { { TPCI200_VENDOR_ID, TPCI200_DEVICE_ID, TPCI200_SUBVENDOR_ID, TPCI200_SUBDEVICE_ID }, { 0, }, }; MODULE_DEVICE_TABLE(pci, tpci200_idtable); static struct pci_driver tpci200_pci_drv = { .name = "tpci200", .id_table = tpci200_idtable, .probe = tpci200_pci_probe, .remove = tpci200_pci_remove, }; module_pci_driver(tpci200_pci_drv); MODULE_DESCRIPTION("TEWS TPCI-200 device driver"); MODULE_LICENSE("GPL");
linux-master
drivers/ipack/carriers/tpci200.c
// SPDX-License-Identifier: GPL-2.0-only /* * driver for the GE IP-OCTAL boards * * Copyright (C) 2009-2012 CERN (www.cern.ch) * Author: Nicolas Serafini, EIC2 SA * Author: Samuel Iglesias Gonsalvez <[email protected]> */ #include <linux/device.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/ipack.h> #include "ipoctal.h" #include "scc2698.h" #define IP_OCTAL_ID_SPACE_VECTOR 0x41 #define IP_OCTAL_NB_BLOCKS 4 static const struct tty_operations ipoctal_fops; struct ipoctal_channel { struct ipoctal_stats stats; unsigned int nb_bytes; wait_queue_head_t queue; spinlock_t lock; unsigned int pointer_read; unsigned int pointer_write; struct tty_port tty_port; bool tty_registered; union scc2698_channel __iomem *regs; union scc2698_block __iomem *block_regs; unsigned int board_id; u8 isr_rx_rdy_mask; u8 isr_tx_rdy_mask; unsigned int rx_enable; }; struct ipoctal { struct ipack_device *dev; unsigned int board_id; struct ipoctal_channel channel[NR_CHANNELS]; struct tty_driver *tty_drv; u8 __iomem *mem8_space; u8 __iomem *int_space; }; static inline struct ipoctal *chan_to_ipoctal(struct ipoctal_channel *chan, unsigned int index) { return container_of(chan, struct ipoctal, channel[index]); } static void ipoctal_reset_channel(struct ipoctal_channel *channel) { iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr); channel->rx_enable = 0; iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr); iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr); iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr); iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr); } static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty) { struct ipoctal_channel *channel; channel = dev_get_drvdata(tty->dev); /* * Enable RX. TX will be enabled when * there is something to send */ iowrite8(CR_ENABLE_RX, &channel->regs->w.cr); channel->rx_enable = 1; return 0; } static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty) { struct ipoctal_channel *channel = dev_get_drvdata(tty->dev); struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index); int res; if (!ipack_get_carrier(ipoctal->dev)) return -EBUSY; res = tty_standard_install(driver, tty); if (res) goto err_put_carrier; tty->driver_data = channel; return 0; err_put_carrier: ipack_put_carrier(ipoctal->dev); return res; } static int ipoctal_open(struct tty_struct *tty, struct file *file) { struct ipoctal_channel *channel = tty->driver_data; return tty_port_open(&channel->tty_port, tty, file); } static void ipoctal_reset_stats(struct ipoctal_stats *stats) { stats->tx = 0; stats->rx = 0; stats->rcv_break = 0; stats->framing_err = 0; stats->overrun_err = 0; stats->parity_err = 0; } static void ipoctal_free_channel(struct ipoctal_channel *channel) { ipoctal_reset_stats(&channel->stats); channel->pointer_read = 0; channel->pointer_write = 0; channel->nb_bytes = 0; } static void ipoctal_close(struct tty_struct *tty, struct file *filp) { struct ipoctal_channel *channel = tty->driver_data; tty_port_close(&channel->tty_port, tty, filp); ipoctal_free_channel(channel); } static int ipoctal_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct ipoctal_channel *channel = tty->driver_data; icount->cts = 0; icount->dsr = 0; icount->rng = 0; icount->dcd = 0; icount->rx = channel->stats.rx; icount->tx = channel->stats.tx; icount->frame = channel->stats.framing_err; icount->parity = channel->stats.parity_err; icount->brk = channel->stats.rcv_break; return 0; } static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr) { struct tty_port *port = &channel->tty_port; unsigned char value; unsigned char flag; u8 isr; do { value = ioread8(&channel->regs->r.rhr); flag = TTY_NORMAL; /* Error: count statistics */ if (sr & SR_ERROR) { iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr); if (sr & SR_OVERRUN_ERROR) { channel->stats.overrun_err++; /* Overrun doesn't affect the current character*/ tty_insert_flip_char(port, 0, TTY_OVERRUN); } if (sr & SR_PARITY_ERROR) { channel->stats.parity_err++; flag = TTY_PARITY; } if (sr & SR_FRAMING_ERROR) { channel->stats.framing_err++; flag = TTY_FRAME; } if (sr & SR_RECEIVED_BREAK) { channel->stats.rcv_break++; flag = TTY_BREAK; } } tty_insert_flip_char(port, value, flag); /* Check if there are more characters in RX FIFO * If there are more, the isr register for this channel * has enabled the RxRDY|FFULL bit. */ isr = ioread8(&channel->block_regs->r.isr); sr = ioread8(&channel->regs->r.sr); } while (isr & channel->isr_rx_rdy_mask); tty_flip_buffer_push(port); } static void ipoctal_irq_tx(struct ipoctal_channel *channel) { unsigned char value; unsigned int *pointer_write = &channel->pointer_write; if (channel->nb_bytes == 0) return; spin_lock(&channel->lock); value = channel->tty_port.xmit_buf[*pointer_write]; iowrite8(value, &channel->regs->w.thr); channel->stats.tx++; (*pointer_write)++; *pointer_write = *pointer_write % PAGE_SIZE; channel->nb_bytes--; spin_unlock(&channel->lock); } static void ipoctal_irq_channel(struct ipoctal_channel *channel) { u8 isr, sr; /* The HW is organized in pair of channels. See which register we need * to read from */ isr = ioread8(&channel->block_regs->r.isr); sr = ioread8(&channel->regs->r.sr); if (isr & (IMR_DELTA_BREAK_A | IMR_DELTA_BREAK_B)) iowrite8(CR_CMD_RESET_BREAK_CHANGE, &channel->regs->w.cr); if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) { iowrite8(CR_DISABLE_TX, &channel->regs->w.cr); /* In case of RS-485, change from TX to RX when finishing TX. * Half-duplex. */ if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) { iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr); iowrite8(CR_ENABLE_RX, &channel->regs->w.cr); channel->rx_enable = 1; } } /* RX data */ if ((isr & channel->isr_rx_rdy_mask) && (sr & SR_RX_READY)) ipoctal_irq_rx(channel, sr); /* TX of each character */ if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY)) ipoctal_irq_tx(channel); } static irqreturn_t ipoctal_irq_handler(void *arg) { unsigned int i; struct ipoctal *ipoctal = arg; /* Clear the IPack device interrupt */ readw(ipoctal->int_space + ACK_INT_REQ0); readw(ipoctal->int_space + ACK_INT_REQ1); /* Check all channels */ for (i = 0; i < NR_CHANNELS; i++) ipoctal_irq_channel(&ipoctal->channel[i]); return IRQ_HANDLED; } static const struct tty_port_operations ipoctal_tty_port_ops = { .dtr_rts = NULL, .activate = ipoctal_port_activate, }; static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, unsigned int slot) { int res; int i; struct tty_driver *drv; struct ipoctal_channel *channel; struct ipack_region *region; void __iomem *addr; union scc2698_channel __iomem *chan_regs; union scc2698_block __iomem *block_regs; ipoctal->board_id = ipoctal->dev->id_device; region = &ipoctal->dev->region[IPACK_IO_SPACE]; addr = devm_ioremap(&ipoctal->dev->dev, region->start, region->size); if (!addr) { dev_err(&ipoctal->dev->dev, "Unable to map slot [%d:%d] IO space!\n", bus_nr, slot); return -EADDRNOTAVAIL; } /* Save the virtual address to access the registers easily */ chan_regs = (union scc2698_channel __iomem *) addr; block_regs = (union scc2698_block __iomem *) addr; region = &ipoctal->dev->region[IPACK_INT_SPACE]; ipoctal->int_space = devm_ioremap(&ipoctal->dev->dev, region->start, region->size); if (!ipoctal->int_space) { dev_err(&ipoctal->dev->dev, "Unable to map slot [%d:%d] INT space!\n", bus_nr, slot); return -EADDRNOTAVAIL; } region = &ipoctal->dev->region[IPACK_MEM8_SPACE]; ipoctal->mem8_space = devm_ioremap(&ipoctal->dev->dev, region->start, 0x8000); if (!ipoctal->mem8_space) { dev_err(&ipoctal->dev->dev, "Unable to map slot [%d:%d] MEM8 space!\n", bus_nr, slot); return -EADDRNOTAVAIL; } /* Disable RX and TX before touching anything */ for (i = 0; i < NR_CHANNELS ; i++) { struct ipoctal_channel *channel = &ipoctal->channel[i]; channel->regs = chan_regs + i; channel->block_regs = block_regs + (i >> 1); channel->board_id = ipoctal->board_id; if (i & 1) { channel->isr_tx_rdy_mask = ISR_TxRDY_B; channel->isr_rx_rdy_mask = ISR_RxRDY_FFULL_B; } else { channel->isr_tx_rdy_mask = ISR_TxRDY_A; channel->isr_rx_rdy_mask = ISR_RxRDY_FFULL_A; } ipoctal_reset_channel(channel); iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY, &channel->regs->w.mr); /* mr1 */ iowrite8(0, &channel->regs->w.mr); /* mr2 */ iowrite8(TX_CLK_9600 | RX_CLK_9600, &channel->regs->w.csr); } for (i = 0; i < IP_OCTAL_NB_BLOCKS; i++) { iowrite8(ACR_BRG_SET2, &block_regs[i].w.acr); iowrite8(OPCR_MPP_OUTPUT | OPCR_MPOa_RTSN | OPCR_MPOb_RTSN, &block_regs[i].w.opcr); iowrite8(IMR_TxRDY_A | IMR_RxRDY_FFULL_A | IMR_DELTA_BREAK_A | IMR_TxRDY_B | IMR_RxRDY_FFULL_B | IMR_DELTA_BREAK_B, &block_regs[i].w.imr); } /* Dummy write */ iowrite8(1, ipoctal->mem8_space + 1); /* Register the TTY device */ /* Each IP-OCTAL channel is a TTY port */ drv = tty_alloc_driver(NR_CHANNELS, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); if (IS_ERR(drv)) return PTR_ERR(drv); /* Fill struct tty_driver with ipoctal data */ drv->owner = THIS_MODULE; drv->driver_name = KBUILD_MODNAME; drv->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot); if (!drv->name) { res = -ENOMEM; goto err_put_driver; } drv->major = 0; drv->minor_start = 0; drv->type = TTY_DRIVER_TYPE_SERIAL; drv->subtype = SERIAL_TYPE_NORMAL; drv->init_termios = tty_std_termios; drv->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; drv->init_termios.c_ispeed = 9600; drv->init_termios.c_ospeed = 9600; tty_set_operations(drv, &ipoctal_fops); res = tty_register_driver(drv); if (res) { dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n"); goto err_free_name; } /* Save struct tty_driver for use it when uninstalling the device */ ipoctal->tty_drv = drv; for (i = 0; i < NR_CHANNELS; i++) { struct device *tty_dev; channel = &ipoctal->channel[i]; tty_port_init(&channel->tty_port); res = tty_port_alloc_xmit_buf(&channel->tty_port); if (res) continue; channel->tty_port.ops = &ipoctal_tty_port_ops; ipoctal_reset_stats(&channel->stats); channel->nb_bytes = 0; spin_lock_init(&channel->lock); channel->pointer_read = 0; channel->pointer_write = 0; tty_dev = tty_port_register_device_attr(&channel->tty_port, drv, i, NULL, channel, NULL); if (IS_ERR(tty_dev)) { dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n"); tty_port_free_xmit_buf(&channel->tty_port); tty_port_destroy(&channel->tty_port); continue; } channel->tty_registered = true; } /* * IP-OCTAL has different addresses to copy its IRQ vector. * Depending of the carrier these addresses are accesible or not. * More info in the datasheet. */ ipoctal->dev->bus->ops->request_irq(ipoctal->dev, ipoctal_irq_handler, ipoctal); return 0; err_free_name: kfree(drv->name); err_put_driver: tty_driver_kref_put(drv); return res; } static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel, const u8 *buf, int count) { unsigned long flags; int i; unsigned int *pointer_read = &channel->pointer_read; /* Copy the bytes from the user buffer to the internal one */ for (i = 0; i < count; i++) { if (i <= (PAGE_SIZE - channel->nb_bytes)) { spin_lock_irqsave(&channel->lock, flags); channel->tty_port.xmit_buf[*pointer_read] = buf[i]; *pointer_read = (*pointer_read + 1) % PAGE_SIZE; channel->nb_bytes++; spin_unlock_irqrestore(&channel->lock, flags); } else { break; } } return i; } static ssize_t ipoctal_write_tty(struct tty_struct *tty, const u8 *buf, size_t count) { struct ipoctal_channel *channel = tty->driver_data; unsigned int char_copied; char_copied = ipoctal_copy_write_buffer(channel, buf, count); /* As the IP-OCTAL 485 only supports half duplex, do it manually */ if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) { iowrite8(CR_DISABLE_RX, &channel->regs->w.cr); channel->rx_enable = 0; iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr); } /* * Send a packet and then disable TX to avoid failure after several send * operations */ iowrite8(CR_ENABLE_TX, &channel->regs->w.cr); return char_copied; } static unsigned int ipoctal_write_room(struct tty_struct *tty) { struct ipoctal_channel *channel = tty->driver_data; return PAGE_SIZE - channel->nb_bytes; } static unsigned int ipoctal_chars_in_buffer(struct tty_struct *tty) { struct ipoctal_channel *channel = tty->driver_data; return channel->nb_bytes; } static void ipoctal_set_termios(struct tty_struct *tty, const struct ktermios *old_termios) { unsigned int cflag; unsigned char mr1 = 0; unsigned char mr2 = 0; unsigned char csr = 0; struct ipoctal_channel *channel = tty->driver_data; speed_t baud; cflag = tty->termios.c_cflag; /* Disable and reset everything before change the setup */ ipoctal_reset_channel(channel); /* Set Bits per chars */ switch (cflag & CSIZE) { case CS6: mr1 |= MR1_CHRL_6_BITS; break; case CS7: mr1 |= MR1_CHRL_7_BITS; break; case CS8: default: mr1 |= MR1_CHRL_8_BITS; /* By default, select CS8 */ tty->termios.c_cflag = (cflag & ~CSIZE) | CS8; break; } /* Set Parity */ if (cflag & PARENB) if (cflag & PARODD) mr1 |= MR1_PARITY_ON | MR1_PARITY_ODD; else mr1 |= MR1_PARITY_ON | MR1_PARITY_EVEN; else mr1 |= MR1_PARITY_OFF; /* Mark or space parity is not supported */ tty->termios.c_cflag &= ~CMSPAR; /* Set stop bits */ if (cflag & CSTOPB) mr2 |= MR2_STOP_BITS_LENGTH_2; else mr2 |= MR2_STOP_BITS_LENGTH_1; /* Set the flow control */ switch (channel->board_id) { case IPACK1_DEVICE_ID_SBS_OCTAL_232: if (cflag & CRTSCTS) { mr1 |= MR1_RxRTS_CONTROL_ON; mr2 |= MR2_TxRTS_CONTROL_OFF | MR2_CTS_ENABLE_TX_ON; } else { mr1 |= MR1_RxRTS_CONTROL_OFF; mr2 |= MR2_TxRTS_CONTROL_OFF | MR2_CTS_ENABLE_TX_OFF; } break; case IPACK1_DEVICE_ID_SBS_OCTAL_422: mr1 |= MR1_RxRTS_CONTROL_OFF; mr2 |= MR2_TxRTS_CONTROL_OFF | MR2_CTS_ENABLE_TX_OFF; break; case IPACK1_DEVICE_ID_SBS_OCTAL_485: mr1 |= MR1_RxRTS_CONTROL_OFF; mr2 |= MR2_TxRTS_CONTROL_ON | MR2_CTS_ENABLE_TX_OFF; break; default: return; } baud = tty_get_baud_rate(tty); tty_termios_encode_baud_rate(&tty->termios, baud, baud); /* Set baud rate */ switch (baud) { case 75: csr |= TX_CLK_75 | RX_CLK_75; break; case 110: csr |= TX_CLK_110 | RX_CLK_110; break; case 150: csr |= TX_CLK_150 | RX_CLK_150; break; case 300: csr |= TX_CLK_300 | RX_CLK_300; break; case 600: csr |= TX_CLK_600 | RX_CLK_600; break; case 1200: csr |= TX_CLK_1200 | RX_CLK_1200; break; case 1800: csr |= TX_CLK_1800 | RX_CLK_1800; break; case 2000: csr |= TX_CLK_2000 | RX_CLK_2000; break; case 2400: csr |= TX_CLK_2400 | RX_CLK_2400; break; case 4800: csr |= TX_CLK_4800 | RX_CLK_4800; break; case 9600: csr |= TX_CLK_9600 | RX_CLK_9600; break; case 19200: csr |= TX_CLK_19200 | RX_CLK_19200; break; case 38400: default: csr |= TX_CLK_38400 | RX_CLK_38400; /* In case of default, we establish 38400 bps */ tty_termios_encode_baud_rate(&tty->termios, 38400, 38400); break; } mr1 |= MR1_ERROR_CHAR; mr1 |= MR1_RxINT_RxRDY; /* Write the control registers */ iowrite8(mr1, &channel->regs->w.mr); iowrite8(mr2, &channel->regs->w.mr); iowrite8(csr, &channel->regs->w.csr); /* Enable again the RX, if it was before */ if (channel->rx_enable) iowrite8(CR_ENABLE_RX, &channel->regs->w.cr); } static void ipoctal_hangup(struct tty_struct *tty) { unsigned long flags; struct ipoctal_channel *channel = tty->driver_data; if (channel == NULL) return; spin_lock_irqsave(&channel->lock, flags); channel->nb_bytes = 0; channel->pointer_read = 0; channel->pointer_write = 0; spin_unlock_irqrestore(&channel->lock, flags); tty_port_hangup(&channel->tty_port); ipoctal_reset_channel(channel); tty_port_set_initialized(&channel->tty_port, false); wake_up_interruptible(&channel->tty_port.open_wait); } static void ipoctal_shutdown(struct tty_struct *tty) { struct ipoctal_channel *channel = tty->driver_data; if (channel == NULL) return; ipoctal_reset_channel(channel); tty_port_set_initialized(&channel->tty_port, false); } static void ipoctal_cleanup(struct tty_struct *tty) { struct ipoctal_channel *channel = tty->driver_data; struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index); /* release the carrier driver */ ipack_put_carrier(ipoctal->dev); } static const struct tty_operations ipoctal_fops = { .ioctl = NULL, .install = ipoctal_install, .open = ipoctal_open, .close = ipoctal_close, .write = ipoctal_write_tty, .set_termios = ipoctal_set_termios, .write_room = ipoctal_write_room, .chars_in_buffer = ipoctal_chars_in_buffer, .get_icount = ipoctal_get_icount, .hangup = ipoctal_hangup, .shutdown = ipoctal_shutdown, .cleanup = ipoctal_cleanup, }; static int ipoctal_probe(struct ipack_device *dev) { int res; struct ipoctal *ipoctal; ipoctal = kzalloc(sizeof(struct ipoctal), GFP_KERNEL); if (ipoctal == NULL) return -ENOMEM; ipoctal->dev = dev; res = ipoctal_inst_slot(ipoctal, dev->bus->bus_nr, dev->slot); if (res) goto out_uninst; dev_set_drvdata(&dev->dev, ipoctal); return 0; out_uninst: kfree(ipoctal); return res; } static void __ipoctal_remove(struct ipoctal *ipoctal) { int i; ipoctal->dev->bus->ops->free_irq(ipoctal->dev); for (i = 0; i < NR_CHANNELS; i++) { struct ipoctal_channel *channel = &ipoctal->channel[i]; if (!channel->tty_registered) continue; tty_unregister_device(ipoctal->tty_drv, i); tty_port_free_xmit_buf(&channel->tty_port); tty_port_destroy(&channel->tty_port); } tty_unregister_driver(ipoctal->tty_drv); kfree(ipoctal->tty_drv->name); tty_driver_kref_put(ipoctal->tty_drv); kfree(ipoctal); } static void ipoctal_remove(struct ipack_device *idev) { __ipoctal_remove(dev_get_drvdata(&idev->dev)); } static DEFINE_IPACK_DEVICE_TABLE(ipoctal_ids) = { { IPACK_DEVICE(IPACK_ID_VERSION_1, IPACK1_VENDOR_ID_SBS, IPACK1_DEVICE_ID_SBS_OCTAL_232) }, { IPACK_DEVICE(IPACK_ID_VERSION_1, IPACK1_VENDOR_ID_SBS, IPACK1_DEVICE_ID_SBS_OCTAL_422) }, { IPACK_DEVICE(IPACK_ID_VERSION_1, IPACK1_VENDOR_ID_SBS, IPACK1_DEVICE_ID_SBS_OCTAL_485) }, { 0, }, }; MODULE_DEVICE_TABLE(ipack, ipoctal_ids); static const struct ipack_driver_ops ipoctal_drv_ops = { .probe = ipoctal_probe, .remove = ipoctal_remove, }; static struct ipack_driver driver = { .ops = &ipoctal_drv_ops, .id_table = ipoctal_ids, }; static int __init ipoctal_init(void) { return ipack_driver_register(&driver, THIS_MODULE, KBUILD_MODNAME); } static void __exit ipoctal_exit(void) { ipack_driver_unregister(&driver); } MODULE_DESCRIPTION("IP-Octal 232, 422 and 485 device driver"); MODULE_LICENSE("GPL"); module_init(ipoctal_init); module_exit(ipoctal_exit);
linux-master
drivers/ipack/devices/ipoctal.c
// SPDX-License-Identifier: GPL-2.0 /* * NVM Express device driver tracepoints * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH */ #include <asm/unaligned.h> #include "trace.h" static const char *nvme_trace_delete_sq(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u16 sqid = get_unaligned_le16(cdw10); trace_seq_printf(p, "sqid=%u", sqid); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u16 sqid = get_unaligned_le16(cdw10); u16 qsize = get_unaligned_le16(cdw10 + 2); u16 sq_flags = get_unaligned_le16(cdw10 + 4); u16 cqid = get_unaligned_le16(cdw10 + 6); trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u", sqid, qsize, sq_flags, cqid); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_delete_cq(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u16 cqid = get_unaligned_le16(cdw10); trace_seq_printf(p, "cqid=%u", cqid); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u16 cqid = get_unaligned_le16(cdw10); u16 qsize = get_unaligned_le16(cdw10 + 2); u16 cq_flags = get_unaligned_le16(cdw10 + 4); u16 irq_vector = get_unaligned_le16(cdw10 + 6); trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u", cqid, qsize, cq_flags, irq_vector); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u8 cns = cdw10[0]; u16 ctrlid = get_unaligned_le16(cdw10 + 2); trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_admin_set_features(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u8 fid = cdw10[0]; u8 sv = cdw10[3] & 0x8; u32 cdw11 = get_unaligned_le32(cdw10 + 4); trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_admin_get_features(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u8 fid = cdw10[0]; u8 sel = cdw10[1] & 0x7; u32 cdw11 = get_unaligned_le32(cdw10 + 4); trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_get_lba_status(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u64 slba = get_unaligned_le64(cdw10); u32 mndw = get_unaligned_le32(cdw10 + 8); u16 rl = get_unaligned_le16(cdw10 + 12); u8 atype = cdw10[15]; trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u", slba, mndw, rl, atype); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u8 lbaf = cdw10[0] & 0xF; u8 mset = (cdw10[0] >> 4) & 0x1; u8 pi = (cdw10[0] >> 5) & 0x7; u8 pil = cdw10[1] & 0x1; u8 ses = (cdw10[1] >> 1) & 0x7; trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u", lbaf, mset, pi, pil, ses); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u64 slba = get_unaligned_le64(cdw10); u16 length = get_unaligned_le16(cdw10 + 8); u16 control = get_unaligned_le16(cdw10 + 10); u32 dsmgmt = get_unaligned_le32(cdw10 + 12); u32 reftag = get_unaligned_le32(cdw10 + 16); trace_seq_printf(p, "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u", slba, length, control, dsmgmt, reftag); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, "nr=%u, attributes=%u", get_unaligned_le32(cdw10), get_unaligned_le32(cdw10 + 4)); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u64 slba = get_unaligned_le64(cdw10); u8 zsa = cdw10[12]; u8 all = cdw10[13]; trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u64 slba = get_unaligned_le64(cdw10); u32 numd = get_unaligned_le32(cdw10 + 8); u8 zra = cdw10[12]; u8 zrasf = cdw10[13]; u8 pr = cdw10[14]; trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u", slba, numd, zra, zrasf, pr); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, "cdw10=%*ph", 24, cdw10); trace_seq_putc(p, 0); return ret; } const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, u8 *cdw10) { switch (opcode) { case nvme_admin_delete_sq: return nvme_trace_delete_sq(p, cdw10); case nvme_admin_create_sq: return nvme_trace_create_sq(p, cdw10); case nvme_admin_delete_cq: return nvme_trace_delete_cq(p, cdw10); case nvme_admin_create_cq: return nvme_trace_create_cq(p, cdw10); case nvme_admin_identify: return nvme_trace_admin_identify(p, cdw10); case nvme_admin_set_features: return nvme_trace_admin_set_features(p, cdw10); case nvme_admin_get_features: return nvme_trace_admin_get_features(p, cdw10); case nvme_admin_get_lba_status: return nvme_trace_get_lba_status(p, cdw10); case nvme_admin_format_nvm: return nvme_trace_admin_format_nvm(p, cdw10); default: return nvme_trace_common(p, cdw10); } } const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, u8 *cdw10) { switch (opcode) { case nvme_cmd_read: case nvme_cmd_write: case nvme_cmd_write_zeroes: case nvme_cmd_zone_append: return nvme_trace_read_write(p, cdw10); case nvme_cmd_dsm: return nvme_trace_dsm(p, cdw10); case nvme_cmd_zone_mgmt_send: return nvme_trace_zone_mgmt_send(p, cdw10); case nvme_cmd_zone_mgmt_recv: return nvme_trace_zone_mgmt_recv(p, cdw10); default: return nvme_trace_common(p, cdw10); } } static const char *nvme_trace_fabrics_property_set(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); u8 attrib = spc[0]; u32 ofst = get_unaligned_le32(spc + 4); u64 value = get_unaligned_le64(spc + 8); trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx", attrib, ofst, value); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_fabrics_connect(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); u16 recfmt = get_unaligned_le16(spc); u16 qid = get_unaligned_le16(spc + 2); u16 sqsize = get_unaligned_le16(spc + 4); u8 cattr = spc[6]; u32 kato = get_unaligned_le32(spc + 8); trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u", recfmt, qid, sqsize, cattr, kato); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); u8 attrib = spc[0]; u32 ofst = get_unaligned_le32(spc + 4); trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); u8 spsp0 = spc[1]; u8 spsp1 = spc[2]; u8 secp = spc[3]; u32 tl = get_unaligned_le32(spc + 4); trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u", spsp0, spsp1, secp, tl); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); u8 spsp0 = spc[1]; u8 spsp1 = spc[2]; u8 secp = spc[3]; u32 al = get_unaligned_le32(spc + 4); trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u", spsp0, spsp1, secp, al); trace_seq_putc(p, 0); return ret; } static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, "specific=%*ph", 24, spc); trace_seq_putc(p, 0); return ret; } const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype, u8 *spc) { switch (fctype) { case nvme_fabrics_type_property_set: return nvme_trace_fabrics_property_set(p, spc); case nvme_fabrics_type_connect: return nvme_trace_fabrics_connect(p, spc); case nvme_fabrics_type_property_get: return nvme_trace_fabrics_property_get(p, spc); case nvme_fabrics_type_auth_send: return nvme_trace_fabrics_auth_send(p, spc); case nvme_fabrics_type_auth_receive: return nvme_trace_fabrics_auth_receive(p, spc); default: return nvme_trace_fabrics_common(p, spc); } } const char *nvme_trace_disk_name(struct trace_seq *p, char *name) { const char *ret = trace_seq_buffer_ptr(p); if (*name) trace_seq_printf(p, "disk=%s, ", name); trace_seq_putc(p, 0); return ret; } EXPORT_TRACEPOINT_SYMBOL_GPL(nvme_sq);
linux-master
drivers/nvme/host/trace.c
// SPDX-License-Identifier: GPL-2.0 /* * fault injection support for nvme. * * Copyright (c) 2018, Oracle and/or its affiliates */ #include <linux/moduleparam.h> #include "nvme.h" static DECLARE_FAULT_ATTR(fail_default_attr); /* optional fault injection attributes boot time option: * nvme_core.fail_request=<interval>,<probability>,<space>,<times> */ static char *fail_request; module_param(fail_request, charp, 0000); void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, const char *dev_name) { struct dentry *dir, *parent; struct fault_attr *attr = &fault_inj->attr; /* set default fault injection attribute */ if (fail_request) setup_fault_attr(&fail_default_attr, fail_request); /* create debugfs directory and attribute */ parent = debugfs_create_dir(dev_name, NULL); if (IS_ERR(parent)) { pr_warn("%s: failed to create debugfs directory\n", dev_name); return; } *attr = fail_default_attr; dir = fault_create_debugfs_attr("fault_inject", parent, attr); if (IS_ERR(dir)) { pr_warn("%s: failed to create debugfs attr\n", dev_name); debugfs_remove_recursive(parent); return; } fault_inj->parent = parent; /* create debugfs for status code and dont_retry */ fault_inj->status = NVME_SC_INVALID_OPCODE; fault_inj->dont_retry = true; debugfs_create_x16("status", 0600, dir, &fault_inj->status); debugfs_create_bool("dont_retry", 0600, dir, &fault_inj->dont_retry); } void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject) { /* remove debugfs directories */ debugfs_remove_recursive(fault_inject->parent); } void nvme_should_fail(struct request *req) { struct gendisk *disk = req->q->disk; struct nvme_fault_inject *fault_inject = NULL; u16 status; if (disk) { struct nvme_ns *ns = disk->private_data; if (ns) fault_inject = &ns->fault_inject; else WARN_ONCE(1, "No namespace found for request\n"); } else { fault_inject = &nvme_req(req)->ctrl->fault_inject; } if (fault_inject && should_fail(&fault_inject->attr, 1)) { /* inject status code and DNR bit */ status = fault_inject->status; if (fault_inject->dont_retry) status |= NVME_SC_DNR; nvme_req(req)->status = status; } } EXPORT_SYMBOL_GPL(nvme_should_fail);
linux-master
drivers/nvme/host/fault_inject.c
// SPDX-License-Identifier: GPL-2.0 /* * NVM Express device driver verbose errors * Copyright (c) 2022, Oracle and/or its affiliates */ #include "nvme.h" static const char * const nvme_ops[] = { [nvme_cmd_flush] = "Flush", [nvme_cmd_write] = "Write", [nvme_cmd_read] = "Read", [nvme_cmd_write_uncor] = "Write Uncorrectable", [nvme_cmd_compare] = "Compare", [nvme_cmd_write_zeroes] = "Write Zeroes", [nvme_cmd_dsm] = "Dataset Management", [nvme_cmd_verify] = "Verify", [nvme_cmd_resv_register] = "Reservation Register", [nvme_cmd_resv_report] = "Reservation Report", [nvme_cmd_resv_acquire] = "Reservation Acquire", [nvme_cmd_resv_release] = "Reservation Release", [nvme_cmd_zone_mgmt_send] = "Zone Management Send", [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", [nvme_cmd_zone_append] = "Zone Append", }; static const char * const nvme_admin_ops[] = { [nvme_admin_delete_sq] = "Delete SQ", [nvme_admin_create_sq] = "Create SQ", [nvme_admin_get_log_page] = "Get Log Page", [nvme_admin_delete_cq] = "Delete CQ", [nvme_admin_create_cq] = "Create CQ", [nvme_admin_identify] = "Identify", [nvme_admin_abort_cmd] = "Abort Command", [nvme_admin_set_features] = "Set Features", [nvme_admin_get_features] = "Get Features", [nvme_admin_async_event] = "Async Event", [nvme_admin_ns_mgmt] = "Namespace Management", [nvme_admin_activate_fw] = "Activate Firmware", [nvme_admin_download_fw] = "Download Firmware", [nvme_admin_dev_self_test] = "Device Self Test", [nvme_admin_ns_attach] = "Namespace Attach", [nvme_admin_keep_alive] = "Keep Alive", [nvme_admin_directive_send] = "Directive Send", [nvme_admin_directive_recv] = "Directive Receive", [nvme_admin_virtual_mgmt] = "Virtual Management", [nvme_admin_nvme_mi_send] = "NVMe Send MI", [nvme_admin_nvme_mi_recv] = "NVMe Receive MI", [nvme_admin_dbbuf] = "Doorbell Buffer Config", [nvme_admin_format_nvm] = "Format NVM", [nvme_admin_security_send] = "Security Send", [nvme_admin_security_recv] = "Security Receive", [nvme_admin_sanitize_nvm] = "Sanitize NVM", [nvme_admin_get_lba_status] = "Get LBA Status", }; static const char * const nvme_fabrics_ops[] = { [nvme_fabrics_type_property_set] = "Property Set", [nvme_fabrics_type_property_get] = "Property Get", [nvme_fabrics_type_connect] = "Connect", [nvme_fabrics_type_auth_send] = "Authentication Send", [nvme_fabrics_type_auth_receive] = "Authentication Receive", }; static const char * const nvme_statuses[] = { [NVME_SC_SUCCESS] = "Success", [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode", [NVME_SC_INVALID_FIELD] = "Invalid Field in Command", [NVME_SC_CMDID_CONFLICT] = "Command ID Conflict", [NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error", [NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification", [NVME_SC_INTERNAL] = "Internal Error", [NVME_SC_ABORT_REQ] = "Command Abort Requested", [NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion", [NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command", [NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command", [NVME_SC_INVALID_NS] = "Invalid Namespace or Format", [NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error", [NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor", [NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors", [NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid", [NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid", [NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid", [NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer", [NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid", [NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded", [NVME_SC_OP_DENIED] = "Operation Denied", [NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid", [NVME_SC_RESERVED] = "Reserved", [NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format", [NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired", [NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid", [NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort", [NVME_SC_SANITIZE_FAILED] = "Sanitize Failed", [NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress", [NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid", [NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB", [NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected", [NVME_SC_CMD_INTERRUPTED] = "Command Interrupted", [NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error", [NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY] = "Admin Command Media Not Ready", [NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set", [NVME_SC_LBA_RANGE] = "LBA Out of Range", [NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded", [NVME_SC_NS_NOT_READY] = "Namespace Not Ready", [NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict", [NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress", [NVME_SC_CQ_INVALID] = "Completion Queue Invalid", [NVME_SC_QID_INVALID] = "Invalid Queue Identifier", [NVME_SC_QUEUE_SIZE] = "Invalid Queue Size", [NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded", [NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */ [NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded", [NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot", [NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image", [NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector", [NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page", [NVME_SC_INVALID_FORMAT] = "Invalid Format", [NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset", [NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion", [NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable", [NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable", [NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific", [NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset", [NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset", [NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation", [NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited", [NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range", [NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity", [NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable", [NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached", [NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private", [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached", [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported", [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid", [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress", [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited", [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier", [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State", [NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources", [NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier", [NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited", [NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid", [NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed", [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes", [NVME_SC_INVALID_PI] = "Invalid Protection Information", [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range", [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported", [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error", [NVME_SC_ZONE_FULL] = "Zone Is Full", [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only", [NVME_SC_ZONE_OFFLINE] = "Zone Is Offline", [NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write", [NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones", [NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones", [NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition", [NVME_SC_WRITE_FAULT] = "Write Fault", [NVME_SC_READ_ERROR] = "Unrecovered Read Error", [NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error", [NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error", [NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error", [NVME_SC_COMPARE_FAILED] = "Compare Failure", [NVME_SC_ACCESS_DENIED] = "Access Denied", [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block", [NVME_SC_INTERNAL_PATH_ERROR] = "Internal Pathing Error", [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss", [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible", [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition", [NVME_SC_CTRL_PATH_ERROR] = "Controller Pathing Error", [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error", [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command", }; const unsigned char *nvme_get_error_status_str(u16 status) { status &= 0x7ff; if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) return nvme_statuses[status & 0x7ff]; return "Unknown"; } const unsigned char *nvme_get_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode]) return nvme_ops[opcode]; return "Unknown"; } EXPORT_SYMBOL_GPL(nvme_get_opcode_str); const unsigned char *nvme_get_admin_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode]) return nvme_admin_ops[opcode]; return "Unknown"; } EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str); const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode]) return nvme_fabrics_ops[opcode]; return "Unknown"; } EXPORT_SYMBOL_GPL(nvme_get_fabrics_opcode_str);
linux-master
drivers/nvme/host/constants.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2020 Hannes Reinecke, SUSE Linux */ #include <linux/crc32.h> #include <linux/base64.h> #include <linux/prandom.h> #include <asm/unaligned.h> #include <crypto/hash.h> #include <crypto/dh.h> #include "nvme.h" #include "fabrics.h" #include <linux/nvme-auth.h> #define CHAP_BUF_SIZE 4096 static struct kmem_cache *nvme_chap_buf_cache; static mempool_t *nvme_chap_buf_pool; struct nvme_dhchap_queue_context { struct list_head entry; struct work_struct auth_work; struct nvme_ctrl *ctrl; struct crypto_shash *shash_tfm; struct crypto_kpp *dh_tfm; void *buf; int qid; int error; u32 s1; u32 s2; u16 transaction; u8 status; u8 dhgroup_id; u8 hash_id; size_t hash_len; u8 c1[64]; u8 c2[64]; u8 response[64]; u8 *host_response; u8 *ctrl_key; u8 *host_key; u8 *sess_key; int ctrl_key_len; int host_key_len; int sess_key_len; }; static struct workqueue_struct *nvme_auth_wq; #define nvme_auth_flags_from_qid(qid) \ (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED #define nvme_auth_queue_from_qid(ctrl, qid) \ (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl) { return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues + ctrl->opts->nr_poll_queues + 1; } static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, void *data, size_t data_len, bool auth_send) { struct nvme_command cmd = {}; blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); int ret; cmd.auth_common.opcode = nvme_fabrics_command; cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; cmd.auth_common.spsp0 = 0x01; cmd.auth_common.spsp1 = 0x01; if (auth_send) { cmd.auth_send.fctype = nvme_fabrics_type_auth_send; cmd.auth_send.tl = cpu_to_le32(data_len); } else { cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive; cmd.auth_receive.al = cpu_to_le32(data_len); } ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, qid == 0 ? NVME_QID_ANY : qid, 0, flags); if (ret > 0) dev_warn(ctrl->device, "qid %d auth_send failed with status %d\n", qid, ret); else if (ret < 0) dev_err(ctrl->device, "qid %d auth_send failed with error %d\n", qid, ret); return ret; } static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid, struct nvmf_auth_dhchap_failure_data *data, u16 transaction, u8 expected_msg) { dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n", __func__, qid, data->auth_type, data->auth_id); if (data->auth_type == NVME_AUTH_COMMON_MESSAGES && data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { return data->rescode_exp; } if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES || data->auth_id != expected_msg) { dev_warn(ctrl->device, "qid %d invalid message %02x/%02x\n", qid, data->auth_type, data->auth_id); return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; } if (le16_to_cpu(data->t_id) != transaction) { dev_warn(ctrl->device, "qid %d invalid transaction ID %d\n", qid, le16_to_cpu(data->t_id)); return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; } return 0; } static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { struct nvmf_auth_dhchap_negotiate_data *data = chap->buf; size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol); if (size > CHAP_BUF_SIZE) { chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return -EINVAL; } memset((u8 *)chap->buf, 0, size); data->auth_type = NVME_AUTH_COMMON_MESSAGES; data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; data->t_id = cpu_to_le16(chap->transaction); data->sc_c = 0; /* No secure channel concatenation */ data->napd = 1; data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID; data->auth_protocol[0].dhchap.halen = 3; data->auth_protocol[0].dhchap.dhlen = 6; data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256; data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384; data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512; data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL; data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048; data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072; data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096; data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; return size; } static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { struct nvmf_auth_dhchap_challenge_data *data = chap->buf; u16 dhvlen = le16_to_cpu(data->dhvlen); size_t size = sizeof(*data) + data->hl + dhvlen; const char *gid_name = nvme_auth_dhgroup_name(data->dhgid); const char *hmac_name, *kpp_name; if (size > CHAP_BUF_SIZE) { chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return -EINVAL; } hmac_name = nvme_auth_hmac_name(data->hashid); if (!hmac_name) { dev_warn(ctrl->device, "qid %d: invalid HASH ID %d\n", chap->qid, data->hashid); chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; return -EPROTO; } if (chap->hash_id == data->hashid && chap->shash_tfm && !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) && crypto_shash_digestsize(chap->shash_tfm) == data->hl) { dev_dbg(ctrl->device, "qid %d: reuse existing hash %s\n", chap->qid, hmac_name); goto select_kpp; } /* Reset if hash cannot be reused */ if (chap->shash_tfm) { crypto_free_shash(chap->shash_tfm); chap->hash_id = 0; chap->hash_len = 0; } chap->shash_tfm = crypto_alloc_shash(hmac_name, 0, CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(chap->shash_tfm)) { dev_warn(ctrl->device, "qid %d: failed to allocate hash %s, error %ld\n", chap->qid, hmac_name, PTR_ERR(chap->shash_tfm)); chap->shash_tfm = NULL; chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; return -ENOMEM; } if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) { dev_warn(ctrl->device, "qid %d: invalid hash length %d\n", chap->qid, data->hl); crypto_free_shash(chap->shash_tfm); chap->shash_tfm = NULL; chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; return -EPROTO; } chap->hash_id = data->hashid; chap->hash_len = data->hl; dev_dbg(ctrl->device, "qid %d: selected hash %s\n", chap->qid, hmac_name); select_kpp: kpp_name = nvme_auth_dhgroup_kpp(data->dhgid); if (!kpp_name) { dev_warn(ctrl->device, "qid %d: invalid DH group id %d\n", chap->qid, data->dhgid); chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; /* Leave previous dh_tfm intact */ return -EPROTO; } if (chap->dhgroup_id == data->dhgid && (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) { dev_dbg(ctrl->device, "qid %d: reuse existing DH group %s\n", chap->qid, gid_name); goto skip_kpp; } /* Reset dh_tfm if it can't be reused */ if (chap->dh_tfm) { crypto_free_kpp(chap->dh_tfm); chap->dh_tfm = NULL; } if (data->dhgid != NVME_AUTH_DHGROUP_NULL) { if (dhvlen == 0) { dev_warn(ctrl->device, "qid %d: empty DH value\n", chap->qid); chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; return -EPROTO; } chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0); if (IS_ERR(chap->dh_tfm)) { int ret = PTR_ERR(chap->dh_tfm); dev_warn(ctrl->device, "qid %d: error %d initializing DH group %s\n", chap->qid, ret, gid_name); chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; chap->dh_tfm = NULL; return ret; } dev_dbg(ctrl->device, "qid %d: selected DH group %s\n", chap->qid, gid_name); } else if (dhvlen != 0) { dev_warn(ctrl->device, "qid %d: invalid DH value for NULL DH\n", chap->qid); chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return -EPROTO; } chap->dhgroup_id = data->dhgid; skip_kpp: chap->s1 = le32_to_cpu(data->seqnum); memcpy(chap->c1, data->cval, chap->hash_len); if (dhvlen) { chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL); if (!chap->ctrl_key) { chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; return -ENOMEM; } chap->ctrl_key_len = dhvlen; memcpy(chap->ctrl_key, data->cval + chap->hash_len, dhvlen); dev_dbg(ctrl->device, "ctrl public key %*ph\n", (int)chap->ctrl_key_len, chap->ctrl_key); } return 0; } static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { struct nvmf_auth_dhchap_reply_data *data = chap->buf; size_t size = sizeof(*data); size += 2 * chap->hash_len; if (chap->host_key_len) size += chap->host_key_len; if (size > CHAP_BUF_SIZE) { chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return -EINVAL; } memset(chap->buf, 0, size); data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY; data->t_id = cpu_to_le16(chap->transaction); data->hl = chap->hash_len; data->dhvlen = cpu_to_le16(chap->host_key_len); memcpy(data->rval, chap->response, chap->hash_len); if (ctrl->ctrl_key) { get_random_bytes(chap->c2, chap->hash_len); data->cvalid = 1; chap->s2 = nvme_auth_get_seqnum(); memcpy(data->rval + chap->hash_len, chap->c2, chap->hash_len); dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", __func__, chap->qid, (int)chap->hash_len, chap->c2); } else { memset(chap->c2, 0, chap->hash_len); chap->s2 = 0; } data->seqnum = cpu_to_le32(chap->s2); if (chap->host_key_len) { dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", __func__, chap->qid, chap->host_key_len, chap->host_key); memcpy(data->rval + 2 * chap->hash_len, chap->host_key, chap->host_key_len); } return size; } static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { struct nvmf_auth_dhchap_success1_data *data = chap->buf; size_t size = sizeof(*data); if (chap->ctrl_key) size += chap->hash_len; if (size > CHAP_BUF_SIZE) { chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return -EINVAL; } if (data->hl != chap->hash_len) { dev_warn(ctrl->device, "qid %d: invalid hash length %u\n", chap->qid, data->hl); chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; return -EPROTO; } /* Just print out information for the admin queue */ if (chap->qid == 0) dev_info(ctrl->device, "qid 0: authenticated with hash %s dhgroup %s\n", nvme_auth_hmac_name(chap->hash_id), nvme_auth_dhgroup_name(chap->dhgroup_id)); if (!data->rvalid) return 0; /* Validate controller response */ if (memcmp(chap->response, data->rval, data->hl)) { dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n", __func__, chap->qid, (int)chap->hash_len, data->rval); dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n", __func__, chap->qid, (int)chap->hash_len, chap->response); dev_warn(ctrl->device, "qid %d: controller authentication failed\n", chap->qid); chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; return -ECONNREFUSED; } /* Just print out information for the admin queue */ if (chap->qid == 0) dev_info(ctrl->device, "qid 0: controller authenticated\n"); return 0; } static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { struct nvmf_auth_dhchap_success2_data *data = chap->buf; size_t size = sizeof(*data); memset(chap->buf, 0, size); data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; data->t_id = cpu_to_le16(chap->transaction); return size; } static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { struct nvmf_auth_dhchap_failure_data *data = chap->buf; size_t size = sizeof(*data); memset(chap->buf, 0, size); data->auth_type = NVME_AUTH_COMMON_MESSAGES; data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; data->t_id = cpu_to_le16(chap->transaction); data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; data->rescode_exp = chap->status; return size; } static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { SHASH_DESC_ON_STACK(shash, chap->shash_tfm); u8 buf[4], *challenge = chap->c1; int ret; dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n", __func__, chap->qid, chap->s1, chap->transaction); if (!chap->host_response) { chap->host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->opts->host->nqn); if (IS_ERR(chap->host_response)) { ret = PTR_ERR(chap->host_response); chap->host_response = NULL; return ret; } } else { dev_dbg(ctrl->device, "%s: qid %d re-using host response\n", __func__, chap->qid); } ret = crypto_shash_setkey(chap->shash_tfm, chap->host_response, ctrl->host_key->len); if (ret) { dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", chap->qid, ret); goto out; } if (chap->dh_tfm) { challenge = kmalloc(chap->hash_len, GFP_KERNEL); if (!challenge) { ret = -ENOMEM; goto out; } ret = nvme_auth_augmented_challenge(chap->hash_id, chap->sess_key, chap->sess_key_len, chap->c1, challenge, chap->hash_len); if (ret) goto out; } shash->tfm = chap->shash_tfm; ret = crypto_shash_init(shash); if (ret) goto out; ret = crypto_shash_update(shash, challenge, chap->hash_len); if (ret) goto out; put_unaligned_le32(chap->s1, buf); ret = crypto_shash_update(shash, buf, 4); if (ret) goto out; put_unaligned_le16(chap->transaction, buf); ret = crypto_shash_update(shash, buf, 2); if (ret) goto out; memset(buf, 0, sizeof(buf)); ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, "HostHost", 8); if (ret) goto out; ret = crypto_shash_update(shash, ctrl->opts->host->nqn, strlen(ctrl->opts->host->nqn)); if (ret) goto out; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, strlen(ctrl->opts->subsysnqn)); if (ret) goto out; ret = crypto_shash_final(shash, chap->response); out: if (challenge != chap->c1) kfree(challenge); return ret; } static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { SHASH_DESC_ON_STACK(shash, chap->shash_tfm); u8 *ctrl_response; u8 buf[4], *challenge = chap->c2; int ret; ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, ctrl->opts->subsysnqn); if (IS_ERR(ctrl_response)) { ret = PTR_ERR(ctrl_response); return ret; } ret = crypto_shash_setkey(chap->shash_tfm, ctrl_response, ctrl->ctrl_key->len); if (ret) { dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", chap->qid, ret); goto out; } if (chap->dh_tfm) { challenge = kmalloc(chap->hash_len, GFP_KERNEL); if (!challenge) { ret = -ENOMEM; goto out; } ret = nvme_auth_augmented_challenge(chap->hash_id, chap->sess_key, chap->sess_key_len, chap->c2, challenge, chap->hash_len); if (ret) goto out; } dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n", __func__, chap->qid, chap->s2, chap->transaction); dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n", __func__, chap->qid, (int)chap->hash_len, challenge); dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n", __func__, chap->qid, ctrl->opts->subsysnqn); dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n", __func__, chap->qid, ctrl->opts->host->nqn); shash->tfm = chap->shash_tfm; ret = crypto_shash_init(shash); if (ret) goto out; ret = crypto_shash_update(shash, challenge, chap->hash_len); if (ret) goto out; put_unaligned_le32(chap->s2, buf); ret = crypto_shash_update(shash, buf, 4); if (ret) goto out; put_unaligned_le16(chap->transaction, buf); ret = crypto_shash_update(shash, buf, 2); if (ret) goto out; memset(buf, 0, 4); ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, "Controller", 10); if (ret) goto out; ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, strlen(ctrl->opts->subsysnqn)); if (ret) goto out; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, ctrl->opts->host->nqn, strlen(ctrl->opts->host->nqn)); if (ret) goto out; ret = crypto_shash_final(shash, chap->response); out: if (challenge != chap->c2) kfree(challenge); kfree(ctrl_response); return ret; } static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { int ret; if (chap->host_key && chap->host_key_len) { dev_dbg(ctrl->device, "qid %d: reusing host key\n", chap->qid); goto gen_sesskey; } ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id); if (ret < 0) { chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return ret; } chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm); chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL); if (!chap->host_key) { chap->host_key_len = 0; chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; return -ENOMEM; } ret = nvme_auth_gen_pubkey(chap->dh_tfm, chap->host_key, chap->host_key_len); if (ret) { dev_dbg(ctrl->device, "failed to generate public key, error %d\n", ret); chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return ret; } gen_sesskey: chap->sess_key_len = chap->host_key_len; chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL); if (!chap->sess_key) { chap->sess_key_len = 0; chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; return -ENOMEM; } ret = nvme_auth_gen_shared_secret(chap->dh_tfm, chap->ctrl_key, chap->ctrl_key_len, chap->sess_key, chap->sess_key_len); if (ret) { dev_dbg(ctrl->device, "failed to generate shared secret, error %d\n", ret); chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; return ret; } dev_dbg(ctrl->device, "shared secret %*ph\n", (int)chap->sess_key_len, chap->sess_key); return 0; } static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) { kfree_sensitive(chap->host_response); chap->host_response = NULL; kfree_sensitive(chap->host_key); chap->host_key = NULL; chap->host_key_len = 0; kfree_sensitive(chap->ctrl_key); chap->ctrl_key = NULL; chap->ctrl_key_len = 0; kfree_sensitive(chap->sess_key); chap->sess_key = NULL; chap->sess_key_len = 0; chap->status = 0; chap->error = 0; chap->s1 = 0; chap->s2 = 0; chap->transaction = 0; memset(chap->c1, 0, sizeof(chap->c1)); memset(chap->c2, 0, sizeof(chap->c2)); mempool_free(chap->buf, nvme_chap_buf_pool); chap->buf = NULL; } static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) { nvme_auth_reset_dhchap(chap); if (chap->shash_tfm) crypto_free_shash(chap->shash_tfm); if (chap->dh_tfm) crypto_free_kpp(chap->dh_tfm); } static void nvme_queue_auth_work(struct work_struct *work) { struct nvme_dhchap_queue_context *chap = container_of(work, struct nvme_dhchap_queue_context, auth_work); struct nvme_ctrl *ctrl = chap->ctrl; size_t tl; int ret = 0; /* * Allocate a large enough buffer for the entire negotiation: * 4k is enough to ffdhe8192. */ chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL); if (!chap->buf) { chap->error = -ENOMEM; return; } chap->transaction = ctrl->transaction++; /* DH-HMAC-CHAP Step 1: send negotiate */ dev_dbg(ctrl->device, "%s: qid %d send negotiate\n", __func__, chap->qid); ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap); if (ret < 0) { chap->error = ret; return; } tl = ret; ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); if (ret) { chap->error = ret; return; } /* DH-HMAC-CHAP Step 2: receive challenge */ dev_dbg(ctrl->device, "%s: qid %d receive challenge\n", __func__, chap->qid); memset(chap->buf, 0, CHAP_BUF_SIZE); ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, false); if (ret) { dev_warn(ctrl->device, "qid %d failed to receive challenge, %s %d\n", chap->qid, ret < 0 ? "error" : "nvme status", ret); chap->error = ret; return; } ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction, NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); if (ret) { chap->status = ret; chap->error = -ECONNREFUSED; return; } ret = nvme_auth_process_dhchap_challenge(ctrl, chap); if (ret) { /* Invalid challenge parameters */ chap->error = ret; goto fail2; } if (chap->ctrl_key_len) { dev_dbg(ctrl->device, "%s: qid %d DH exponential\n", __func__, chap->qid); ret = nvme_auth_dhchap_exponential(ctrl, chap); if (ret) { chap->error = ret; goto fail2; } } dev_dbg(ctrl->device, "%s: qid %d host response\n", __func__, chap->qid); mutex_lock(&ctrl->dhchap_auth_mutex); ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); if (ret) { mutex_unlock(&ctrl->dhchap_auth_mutex); chap->error = ret; goto fail2; } mutex_unlock(&ctrl->dhchap_auth_mutex); /* DH-HMAC-CHAP Step 3: send reply */ dev_dbg(ctrl->device, "%s: qid %d send reply\n", __func__, chap->qid); ret = nvme_auth_set_dhchap_reply_data(ctrl, chap); if (ret < 0) { chap->error = ret; goto fail2; } tl = ret; ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); if (ret) { chap->error = ret; goto fail2; } /* DH-HMAC-CHAP Step 4: receive success1 */ dev_dbg(ctrl->device, "%s: qid %d receive success1\n", __func__, chap->qid); memset(chap->buf, 0, CHAP_BUF_SIZE); ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, false); if (ret) { dev_warn(ctrl->device, "qid %d failed to receive success1, %s %d\n", chap->qid, ret < 0 ? "error" : "nvme status", ret); chap->error = ret; return; } ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction, NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); if (ret) { chap->status = ret; chap->error = -ECONNREFUSED; return; } mutex_lock(&ctrl->dhchap_auth_mutex); if (ctrl->ctrl_key) { dev_dbg(ctrl->device, "%s: qid %d controller response\n", __func__, chap->qid); ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap); if (ret) { mutex_unlock(&ctrl->dhchap_auth_mutex); chap->error = ret; goto fail2; } } mutex_unlock(&ctrl->dhchap_auth_mutex); ret = nvme_auth_process_dhchap_success1(ctrl, chap); if (ret) { /* Controller authentication failed */ chap->error = -ECONNREFUSED; goto fail2; } if (chap->ctrl_key) { /* DH-HMAC-CHAP Step 5: send success2 */ dev_dbg(ctrl->device, "%s: qid %d send success2\n", __func__, chap->qid); tl = nvme_auth_set_dhchap_success2_data(ctrl, chap); ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); if (ret) chap->error = ret; } if (!ret) { chap->error = 0; return; } fail2: dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", __func__, chap->qid, chap->status); tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); /* * only update error if send failure2 failed and no other * error had been set during authentication. */ if (ret && !chap->error) chap->error = ret; } int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) { struct nvme_dhchap_queue_context *chap; if (!ctrl->host_key) { dev_warn(ctrl->device, "qid %d: no key\n", qid); return -ENOKEY; } if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) { dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid); return -ENOKEY; } chap = &ctrl->dhchap_ctxs[qid]; cancel_work_sync(&chap->auth_work); queue_work(nvme_auth_wq, &chap->auth_work); return 0; } EXPORT_SYMBOL_GPL(nvme_auth_negotiate); int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) { struct nvme_dhchap_queue_context *chap; int ret; chap = &ctrl->dhchap_ctxs[qid]; flush_work(&chap->auth_work); ret = chap->error; /* clear sensitive info */ nvme_auth_reset_dhchap(chap); return ret; } EXPORT_SYMBOL_GPL(nvme_auth_wait); static void nvme_ctrl_auth_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, dhchap_auth_work); int ret, q; /* * If the ctrl is no connected, bail as reconnect will handle * authentication. */ if (ctrl->state != NVME_CTRL_LIVE) return; /* Authenticate admin queue first */ ret = nvme_auth_negotiate(ctrl, 0); if (ret) { dev_warn(ctrl->device, "qid 0: error %d setting up authentication\n", ret); return; } ret = nvme_auth_wait(ctrl, 0); if (ret) { dev_warn(ctrl->device, "qid 0: authentication failed\n"); return; } for (q = 1; q < ctrl->queue_count; q++) { ret = nvme_auth_negotiate(ctrl, q); if (ret) { dev_warn(ctrl->device, "qid %d: error %d setting up authentication\n", q, ret); break; } } /* * Failure is a soft-state; credentials remain valid until * the controller terminates the connection. */ for (q = 1; q < ctrl->queue_count; q++) { ret = nvme_auth_wait(ctrl, q); if (ret) dev_warn(ctrl->device, "qid %d: authentication failed\n", q); } } int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) { struct nvme_dhchap_queue_context *chap; int i, ret; mutex_init(&ctrl->dhchap_auth_mutex); INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work); if (!ctrl->opts) return 0; ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key); if (ret) return ret; ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key); if (ret) goto err_free_dhchap_secret; if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret) return 0; ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl), sizeof(*chap), GFP_KERNEL); if (!ctrl->dhchap_ctxs) { ret = -ENOMEM; goto err_free_dhchap_ctrl_secret; } for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) { chap = &ctrl->dhchap_ctxs[i]; chap->qid = i; chap->ctrl = ctrl; INIT_WORK(&chap->auth_work, nvme_queue_auth_work); } return 0; err_free_dhchap_ctrl_secret: nvme_auth_free_key(ctrl->ctrl_key); ctrl->ctrl_key = NULL; err_free_dhchap_secret: nvme_auth_free_key(ctrl->host_key); ctrl->host_key = NULL; return ret; } EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl); void nvme_auth_stop(struct nvme_ctrl *ctrl) { cancel_work_sync(&ctrl->dhchap_auth_work); } EXPORT_SYMBOL_GPL(nvme_auth_stop); void nvme_auth_free(struct nvme_ctrl *ctrl) { int i; if (ctrl->dhchap_ctxs) { for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); kfree(ctrl->dhchap_ctxs); } if (ctrl->host_key) { nvme_auth_free_key(ctrl->host_key); ctrl->host_key = NULL; } if (ctrl->ctrl_key) { nvme_auth_free_key(ctrl->ctrl_key); ctrl->ctrl_key = NULL; } } EXPORT_SYMBOL_GPL(nvme_auth_free); int __init nvme_init_auth(void) { nvme_auth_wq = alloc_workqueue("nvme-auth-wq", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); if (!nvme_auth_wq) return -ENOMEM; nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache", CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); if (!nvme_chap_buf_cache) goto err_destroy_workqueue; nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab, mempool_free_slab, nvme_chap_buf_cache); if (!nvme_chap_buf_pool) goto err_destroy_chap_buf_cache; return 0; err_destroy_chap_buf_cache: kmem_cache_destroy(nvme_chap_buf_cache); err_destroy_workqueue: destroy_workqueue(nvme_auth_wq); return -ENOMEM; } void __exit nvme_exit_auth(void) { mempool_destroy(nvme_chap_buf_pool); kmem_cache_destroy(nvme_chap_buf_cache); destroy_workqueue(nvme_auth_wq); }
linux-master
drivers/nvme/host/auth.c
// SPDX-License-Identifier: GPL-2.0 /* * Sysfs interface for the NVMe core driver. * * Copyright (c) 2011-2014, Intel Corporation. */ #include <linux/nvme-auth.h> #include "nvme.h" #include "fabrics.h" static ssize_t nvme_sysfs_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); int ret; ret = nvme_reset_ctrl_sync(ctrl); if (ret < 0) return ret; return count; } static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); static ssize_t nvme_sysfs_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); nvme_queue_scan(ctrl); return count; } static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) { struct gendisk *disk = dev_to_disk(dev); if (disk->fops == &nvme_bdev_ops) return nvme_get_ns_from_dev(dev)->head; else return disk->private_data; } static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ns_head *head = dev_to_ns_head(dev); struct nvme_ns_ids *ids = &head->ids; struct nvme_subsystem *subsys = head->subsys; int serial_len = sizeof(subsys->serial); int model_len = sizeof(subsys->model); if (!uuid_is_null(&ids->uuid)) return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || subsys->serial[serial_len - 1] == '\0')) serial_len--; while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || subsys->model[model_len - 1] == '\0')) model_len--; return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, serial_len, subsys->serial, model_len, subsys->model, head->ns_id); } static DEVICE_ATTR_RO(wwid); static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); } static DEVICE_ATTR_RO(nguid); static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; /* For backward compatibility expose the NGUID to userspace if * we have no UUID set */ if (uuid_is_null(&ids->uuid)) { dev_warn_once(dev, "No UUID available providing old NGUID\n"); return sysfs_emit(buf, "%pU\n", ids->nguid); } return sysfs_emit(buf, "%pU\n", &ids->uuid); } static DEVICE_ATTR_RO(uuid); static ssize_t eui_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); } static DEVICE_ATTR_RO(eui); static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); } static DEVICE_ATTR_RO(nsid); static struct attribute *nvme_ns_id_attrs[] = { &dev_attr_wwid.attr, &dev_attr_uuid.attr, &dev_attr_nguid.attr, &dev_attr_eui.attr, &dev_attr_nsid.attr, #ifdef CONFIG_NVME_MULTIPATH &dev_attr_ana_grpid.attr, &dev_attr_ana_state.attr, #endif NULL, }; static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; if (a == &dev_attr_uuid.attr) { if (uuid_is_null(&ids->uuid) && !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) return 0; } if (a == &dev_attr_nguid.attr) { if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) return 0; } if (a == &dev_attr_eui.attr) { if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) return 0; } #ifdef CONFIG_NVME_MULTIPATH if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */ return 0; if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) return 0; } #endif return a->mode; } static const struct attribute_group nvme_ns_id_attr_group = { .attrs = nvme_ns_id_attrs, .is_visible = nvme_ns_id_attrs_are_visible, }; const struct attribute_group *nvme_ns_id_attr_groups[] = { &nvme_ns_id_attr_group, NULL, }; #define nvme_show_str_function(field) \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ return sysfs_emit(buf, "%.*s\n", \ (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ } \ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); nvme_show_str_function(model); nvme_show_str_function(serial); nvme_show_str_function(firmware_rev); #define nvme_show_int_function(field) \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ return sysfs_emit(buf, "%d\n", ctrl->field); \ } \ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); nvme_show_int_function(cntlid); nvme_show_int_function(numa_node); nvme_show_int_function(queue_count); nvme_show_int_function(sqsize); nvme_show_int_function(kato); static ssize_t nvme_sysfs_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) return -EBUSY; if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; } static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); static ssize_t nvme_sysfs_show_transport(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); return sysfs_emit(buf, "%s\n", ctrl->ops->name); } static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); static ssize_t nvme_sysfs_show_state(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); static const char *const state_name[] = { [NVME_CTRL_NEW] = "new", [NVME_CTRL_LIVE] = "live", [NVME_CTRL_RESETTING] = "resetting", [NVME_CTRL_CONNECTING] = "connecting", [NVME_CTRL_DELETING] = "deleting", [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", [NVME_CTRL_DEAD] = "dead", }; if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && state_name[ctrl->state]) return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); return sysfs_emit(buf, "unknown state\n"); } static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); } static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); } static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); static ssize_t nvme_sysfs_show_hostid(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); } static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); static ssize_t nvme_sysfs_show_address(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); } static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvmf_ctrl_options *opts = ctrl->opts; if (ctrl->opts->max_reconnects == -1) return sysfs_emit(buf, "off\n"); return sysfs_emit(buf, "%d\n", opts->max_reconnects * opts->reconnect_delay); } static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvmf_ctrl_options *opts = ctrl->opts; int ctrl_loss_tmo, err; err = kstrtoint(buf, 10, &ctrl_loss_tmo); if (err) return -EINVAL; if (ctrl_loss_tmo < 0) opts->max_reconnects = -1; else opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, opts->reconnect_delay); return count; } static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); if (ctrl->opts->reconnect_delay == -1) return sysfs_emit(buf, "off\n"); return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); } static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); unsigned int v; int err; err = kstrtou32(buf, 10, &v); if (err) return err; ctrl->opts->reconnect_delay = v; return count; } static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); if (ctrl->opts->fast_io_fail_tmo == -1) return sysfs_emit(buf, "off\n"); return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); } static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvmf_ctrl_options *opts = ctrl->opts; int fast_io_fail_tmo, err; err = kstrtoint(buf, 10, &fast_io_fail_tmo); if (err) return -EINVAL; if (fast_io_fail_tmo < 0) opts->fast_io_fail_tmo = -1; else opts->fast_io_fail_tmo = fast_io_fail_tmo; return count; } static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); static ssize_t cntrltype_show(struct device *dev, struct device_attribute *attr, char *buf) { static const char * const type[] = { [NVME_CTRL_IO] = "io\n", [NVME_CTRL_DISC] = "discovery\n", [NVME_CTRL_ADMIN] = "admin\n", }; struct nvme_ctrl *ctrl = dev_get_drvdata(dev); if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) return sysfs_emit(buf, "reserved\n"); return sysfs_emit(buf, type[ctrl->cntrltype]); } static DEVICE_ATTR_RO(cntrltype); static ssize_t dctype_show(struct device *dev, struct device_attribute *attr, char *buf) { static const char * const type[] = { [NVME_DCTYPE_NOT_REPORTED] = "none\n", [NVME_DCTYPE_DDC] = "ddc\n", [NVME_DCTYPE_CDC] = "cdc\n", }; struct nvme_ctrl *ctrl = dev_get_drvdata(dev); if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) return sysfs_emit(buf, "reserved\n"); return sysfs_emit(buf, type[ctrl->dctype]); } static DEVICE_ATTR_RO(dctype); #ifdef CONFIG_NVME_AUTH static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvmf_ctrl_options *opts = ctrl->opts; if (!opts->dhchap_secret) return sysfs_emit(buf, "none\n"); return sysfs_emit(buf, "%s\n", opts->dhchap_secret); } static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvmf_ctrl_options *opts = ctrl->opts; char *dhchap_secret; if (!ctrl->opts->dhchap_secret) return -EINVAL; if (count < 7) return -EINVAL; if (memcmp(buf, "DHHC-1:", 7)) return -EINVAL; dhchap_secret = kzalloc(count + 1, GFP_KERNEL); if (!dhchap_secret) return -ENOMEM; memcpy(dhchap_secret, buf, count); nvme_auth_stop(ctrl); if (strcmp(dhchap_secret, opts->dhchap_secret)) { struct nvme_dhchap_key *key, *host_key; int ret; ret = nvme_auth_generate_key(dhchap_secret, &key); if (ret) { kfree(dhchap_secret); return ret; } kfree(opts->dhchap_secret); opts->dhchap_secret = dhchap_secret; host_key = ctrl->host_key; mutex_lock(&ctrl->dhchap_auth_mutex); ctrl->host_key = key; mutex_unlock(&ctrl->dhchap_auth_mutex); nvme_auth_free_key(host_key); } else kfree(dhchap_secret); /* Start re-authentication */ dev_info(ctrl->device, "re-authenticating controller\n"); queue_work(nvme_wq, &ctrl->dhchap_auth_work); return count; } static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvmf_ctrl_options *opts = ctrl->opts; if (!opts->dhchap_ctrl_secret) return sysfs_emit(buf, "none\n"); return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); } static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvmf_ctrl_options *opts = ctrl->opts; char *dhchap_secret; if (!ctrl->opts->dhchap_ctrl_secret) return -EINVAL; if (count < 7) return -EINVAL; if (memcmp(buf, "DHHC-1:", 7)) return -EINVAL; dhchap_secret = kzalloc(count + 1, GFP_KERNEL); if (!dhchap_secret) return -ENOMEM; memcpy(dhchap_secret, buf, count); nvme_auth_stop(ctrl); if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { struct nvme_dhchap_key *key, *ctrl_key; int ret; ret = nvme_auth_generate_key(dhchap_secret, &key); if (ret) { kfree(dhchap_secret); return ret; } kfree(opts->dhchap_ctrl_secret); opts->dhchap_ctrl_secret = dhchap_secret; ctrl_key = ctrl->ctrl_key; mutex_lock(&ctrl->dhchap_auth_mutex); ctrl->ctrl_key = key; mutex_unlock(&ctrl->dhchap_auth_mutex); nvme_auth_free_key(ctrl_key); } else kfree(dhchap_secret); /* Start re-authentication */ dev_info(ctrl->device, "re-authenticating controller\n"); queue_work(nvme_wq, &ctrl->dhchap_auth_work); return count; } static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); #endif static struct attribute *nvme_dev_attrs[] = { &dev_attr_reset_controller.attr, &dev_attr_rescan_controller.attr, &dev_attr_model.attr, &dev_attr_serial.attr, &dev_attr_firmware_rev.attr, &dev_attr_cntlid.attr, &dev_attr_delete_controller.attr, &dev_attr_transport.attr, &dev_attr_subsysnqn.attr, &dev_attr_address.attr, &dev_attr_state.attr, &dev_attr_numa_node.attr, &dev_attr_queue_count.attr, &dev_attr_sqsize.attr, &dev_attr_hostnqn.attr, &dev_attr_hostid.attr, &dev_attr_ctrl_loss_tmo.attr, &dev_attr_reconnect_delay.attr, &dev_attr_fast_io_fail_tmo.attr, &dev_attr_kato.attr, &dev_attr_cntrltype.attr, &dev_attr_dctype.attr, #ifdef CONFIG_NVME_AUTH &dev_attr_dhchap_secret.attr, &dev_attr_dhchap_ctrl_secret.attr, #endif NULL }; static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct nvme_ctrl *ctrl = dev_get_drvdata(dev); if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) return 0; if (a == &dev_attr_address.attr && !ctrl->ops->get_address) return 0; if (a == &dev_attr_hostnqn.attr && !ctrl->opts) return 0; if (a == &dev_attr_hostid.attr && !ctrl->opts) return 0; if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) return 0; if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) return 0; if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) return 0; #ifdef CONFIG_NVME_AUTH if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) return 0; if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) return 0; #endif return a->mode; } const struct attribute_group nvme_dev_attrs_group = { .attrs = nvme_dev_attrs, .is_visible = nvme_dev_attrs_are_visible, }; EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); const struct attribute_group *nvme_dev_attr_groups[] = { &nvme_dev_attrs_group, NULL, }; #define SUBSYS_ATTR_RO(_name, _mode, _show) \ struct device_attribute subsys_attr_##_name = \ __ATTR(_name, _mode, _show, NULL) static ssize_t nvme_subsys_show_nqn(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_subsystem *subsys = container_of(dev, struct nvme_subsystem, dev); return sysfs_emit(buf, "%s\n", subsys->subnqn); } static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); static ssize_t nvme_subsys_show_type(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_subsystem *subsys = container_of(dev, struct nvme_subsystem, dev); switch (subsys->subtype) { case NVME_NQN_DISC: return sysfs_emit(buf, "discovery\n"); case NVME_NQN_NVME: return sysfs_emit(buf, "nvm\n"); default: return sysfs_emit(buf, "reserved\n"); } } static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); #define nvme_subsys_show_str_function(field) \ static ssize_t subsys_##field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct nvme_subsystem *subsys = \ container_of(dev, struct nvme_subsystem, dev); \ return sysfs_emit(buf, "%.*s\n", \ (int)sizeof(subsys->field), subsys->field); \ } \ static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); nvme_subsys_show_str_function(model); nvme_subsys_show_str_function(serial); nvme_subsys_show_str_function(firmware_rev); static struct attribute *nvme_subsys_attrs[] = { &subsys_attr_model.attr, &subsys_attr_serial.attr, &subsys_attr_firmware_rev.attr, &subsys_attr_subsysnqn.attr, &subsys_attr_subsystype.attr, #ifdef CONFIG_NVME_MULTIPATH &subsys_attr_iopolicy.attr, #endif NULL, }; static const struct attribute_group nvme_subsys_attrs_group = { .attrs = nvme_subsys_attrs, }; const struct attribute_group *nvme_subsys_attrs_groups[] = { &nvme_subsys_attrs_group, NULL, };
linux-master
drivers/nvme/host/sysfs.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics RDMA host code. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <rdma/mr_pool.h> #include <linux/err.h> #include <linux/string.h> #include <linux/atomic.h> #include <linux/blk-mq.h> #include <linux/blk-integrity.h> #include <linux/types.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/nvme.h> #include <asm/unaligned.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <linux/nvme-rdma.h> #include "nvme.h" #include "fabrics.h" #define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */ #define NVME_RDMA_MAX_SEGMENTS 256 #define NVME_RDMA_MAX_INLINE_SEGMENTS 4 #define NVME_RDMA_DATA_SGL_SIZE \ (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT) #define NVME_RDMA_METADATA_SGL_SIZE \ (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT) struct nvme_rdma_device { struct ib_device *dev; struct ib_pd *pd; struct kref ref; struct list_head entry; unsigned int num_inline_segments; }; struct nvme_rdma_qe { struct ib_cqe cqe; void *data; u64 dma; }; struct nvme_rdma_sgl { int nents; struct sg_table sg_table; }; struct nvme_rdma_queue; struct nvme_rdma_request { struct nvme_request req; struct ib_mr *mr; struct nvme_rdma_qe sqe; union nvme_result result; __le16 status; refcount_t ref; struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; u32 num_sge; struct ib_reg_wr reg_wr; struct ib_cqe reg_cqe; struct nvme_rdma_queue *queue; struct nvme_rdma_sgl data_sgl; struct nvme_rdma_sgl *metadata_sgl; bool use_sig_mr; }; enum nvme_rdma_queue_flags { NVME_RDMA_Q_ALLOCATED = 0, NVME_RDMA_Q_LIVE = 1, NVME_RDMA_Q_TR_READY = 2, }; struct nvme_rdma_queue { struct nvme_rdma_qe *rsp_ring; int queue_size; size_t cmnd_capsule_len; struct nvme_rdma_ctrl *ctrl; struct nvme_rdma_device *device; struct ib_cq *ib_cq; struct ib_qp *qp; unsigned long flags; struct rdma_cm_id *cm_id; int cm_error; struct completion cm_done; bool pi_support; int cq_size; struct mutex queue_lock; }; struct nvme_rdma_ctrl { /* read only in the hot path */ struct nvme_rdma_queue *queues; /* other member variables */ struct blk_mq_tag_set tag_set; struct work_struct err_work; struct nvme_rdma_qe async_event_sqe; struct delayed_work reconnect_work; struct list_head list; struct blk_mq_tag_set admin_tag_set; struct nvme_rdma_device *device; u32 max_fr_pages; struct sockaddr_storage addr; struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; bool use_inline_data; u32 io_queues[HCTX_MAX_TYPES]; }; static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) { return container_of(ctrl, struct nvme_rdma_ctrl, ctrl); } static LIST_HEAD(device_list); static DEFINE_MUTEX(device_list_mutex); static LIST_HEAD(nvme_rdma_ctrl_list); static DEFINE_MUTEX(nvme_rdma_ctrl_mutex); /* * Disabling this option makes small I/O goes faster, but is fundamentally * unsafe. With it turned off we will have to register a global rkey that * allows read and write access to all physical memory. */ static bool register_always = true; module_param(register_always, bool, 0444); MODULE_PARM_DESC(register_always, "Use memory registration even for contiguous memory regions"); static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void nvme_rdma_complete_rq(struct request *rq); static const struct blk_mq_ops nvme_rdma_mq_ops; static const struct blk_mq_ops nvme_rdma_admin_mq_ops; static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) { return queue - queue->ctrl->queues; } static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) { return nvme_rdma_queue_idx(queue) > queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + queue->ctrl->io_queues[HCTX_TYPE_READ]; } static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) { return queue->cmnd_capsule_len - sizeof(struct nvme_command); } static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, size_t capsule_size, enum dma_data_direction dir) { ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); kfree(qe->data); } static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, size_t capsule_size, enum dma_data_direction dir) { qe->data = kzalloc(capsule_size, GFP_KERNEL); if (!qe->data) return -ENOMEM; qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); if (ib_dma_mapping_error(ibdev, qe->dma)) { kfree(qe->data); qe->data = NULL; return -ENOMEM; } return 0; } static void nvme_rdma_free_ring(struct ib_device *ibdev, struct nvme_rdma_qe *ring, size_t ib_queue_size, size_t capsule_size, enum dma_data_direction dir) { int i; for (i = 0; i < ib_queue_size; i++) nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir); kfree(ring); } static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, size_t ib_queue_size, size_t capsule_size, enum dma_data_direction dir) { struct nvme_rdma_qe *ring; int i; ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL); if (!ring) return NULL; /* * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue * lifetime. It's safe, since any chage in the underlying RDMA device * will issue error recovery and queue re-creation. */ for (i = 0; i < ib_queue_size; i++) { if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) goto out_free_ring; } return ring; out_free_ring: nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir); return NULL; } static void nvme_rdma_qp_event(struct ib_event *event, void *context) { pr_debug("QP event %s (%d)\n", ib_event_msg(event->event), event->event); } static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) { int ret; ret = wait_for_completion_interruptible(&queue->cm_done); if (ret) return ret; WARN_ON_ONCE(queue->cm_error > 0); return queue->cm_error; } static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) { struct nvme_rdma_device *dev = queue->device; struct ib_qp_init_attr init_attr; int ret; memset(&init_attr, 0, sizeof(init_attr)); init_attr.event_handler = nvme_rdma_qp_event; /* +1 for drain */ init_attr.cap.max_send_wr = factor * queue->queue_size + 1; /* +1 for drain */ init_attr.cap.max_recv_wr = queue->queue_size + 1; init_attr.cap.max_recv_sge = 1; init_attr.cap.max_send_sge = 1 + dev->num_inline_segments; init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; init_attr.qp_type = IB_QPT_RC; init_attr.send_cq = queue->ib_cq; init_attr.recv_cq = queue->ib_cq; if (queue->pi_support) init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; init_attr.qp_context = queue; ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); queue->qp = queue->cm_id->qp; return ret; } static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); kfree(req->sqe.data); } static int nvme_rdma_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; nvme_req(rq)->ctrl = &ctrl->ctrl; req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); if (!req->sqe.data) return -ENOMEM; /* metadata nvme_rdma_sgl struct is located after command's data SGL */ if (queue->pi_support) req->metadata_sgl = (void *)nvme_req(rq) + sizeof(struct nvme_rdma_request) + NVME_RDMA_DATA_SGL_SIZE; req->queue = queue; nvme_req(rq)->cmd = req->sqe.data; return 0; } static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data); struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); hctx->driver_data = queue; return 0; } static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data); struct nvme_rdma_queue *queue = &ctrl->queues[0]; BUG_ON(hctx_idx != 0); hctx->driver_data = queue; return 0; } static void nvme_rdma_free_dev(struct kref *ref) { struct nvme_rdma_device *ndev = container_of(ref, struct nvme_rdma_device, ref); mutex_lock(&device_list_mutex); list_del(&ndev->entry); mutex_unlock(&device_list_mutex); ib_dealloc_pd(ndev->pd); kfree(ndev); } static void nvme_rdma_dev_put(struct nvme_rdma_device *dev) { kref_put(&dev->ref, nvme_rdma_free_dev); } static int nvme_rdma_dev_get(struct nvme_rdma_device *dev) { return kref_get_unless_zero(&dev->ref); } static struct nvme_rdma_device * nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) { struct nvme_rdma_device *ndev; mutex_lock(&device_list_mutex); list_for_each_entry(ndev, &device_list, entry) { if (ndev->dev->node_guid == cm_id->device->node_guid && nvme_rdma_dev_get(ndev)) goto out_unlock; } ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); if (!ndev) goto out_err; ndev->dev = cm_id->device; kref_init(&ndev->ref); ndev->pd = ib_alloc_pd(ndev->dev, register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); if (IS_ERR(ndev->pd)) goto out_free_dev; if (!(ndev->dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) { dev_err(&ndev->dev->dev, "Memory registrations not supported.\n"); goto out_free_pd; } ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS, ndev->dev->attrs.max_send_sge - 1); list_add(&ndev->entry, &device_list); out_unlock: mutex_unlock(&device_list_mutex); return ndev; out_free_pd: ib_dealloc_pd(ndev->pd); out_free_dev: kfree(ndev); out_err: mutex_unlock(&device_list_mutex); return NULL; } static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue) { if (nvme_rdma_poll_queue(queue)) ib_free_cq(queue->ib_cq); else ib_cq_pool_put(queue->ib_cq, queue->cq_size); } static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) { struct nvme_rdma_device *dev; struct ib_device *ibdev; if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) return; dev = queue->device; ibdev = dev->dev; if (queue->pi_support) ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); /* * The cm_id object might have been destroyed during RDMA connection * establishment error flow to avoid getting other cma events, thus * the destruction of the QP shouldn't use rdma_cm API. */ ib_destroy_qp(queue->qp); nvme_rdma_free_cq(queue); nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, sizeof(struct nvme_completion), DMA_FROM_DEVICE); nvme_rdma_dev_put(dev); } static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support) { u32 max_page_list_len; if (pi_support) max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len; else max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len; return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1); } static int nvme_rdma_create_cq(struct ib_device *ibdev, struct nvme_rdma_queue *queue) { int ret, comp_vector, idx = nvme_rdma_queue_idx(queue); /* * Spread I/O queues completion vectors according their queue index. * Admin queues can always go on completion vector 0. */ comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; /* Polling queues need direct cq polling context */ if (nvme_rdma_poll_queue(queue)) queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, comp_vector, IB_POLL_DIRECT); else queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, comp_vector, IB_POLL_SOFTIRQ); if (IS_ERR(queue->ib_cq)) { ret = PTR_ERR(queue->ib_cq); return ret; } return 0; } static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) { struct ib_device *ibdev; const int send_wr_factor = 3; /* MR, SEND, INV */ const int cq_factor = send_wr_factor + 1; /* + RECV */ int ret, pages_per_mr; queue->device = nvme_rdma_find_get_device(queue->cm_id); if (!queue->device) { dev_err(queue->cm_id->device->dev.parent, "no client data found!\n"); return -ECONNREFUSED; } ibdev = queue->device->dev; /* +1 for ib_drain_qp */ queue->cq_size = cq_factor * queue->queue_size + 1; ret = nvme_rdma_create_cq(ibdev, queue); if (ret) goto out_put_dev; ret = nvme_rdma_create_qp(queue, send_wr_factor); if (ret) goto out_destroy_ib_cq; queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, sizeof(struct nvme_completion), DMA_FROM_DEVICE); if (!queue->rsp_ring) { ret = -ENOMEM; goto out_destroy_qp; } /* * Currently we don't use SG_GAPS MR's so if the first entry is * misaligned we'll end up using two entries for a single data page, * so one additional entry is required. */ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, queue->queue_size, IB_MR_TYPE_MEM_REG, pages_per_mr, 0); if (ret) { dev_err(queue->ctrl->ctrl.device, "failed to initialize MR pool sized %d for QID %d\n", queue->queue_size, nvme_rdma_queue_idx(queue)); goto out_destroy_ring; } if (queue->pi_support) { ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, queue->queue_size, IB_MR_TYPE_INTEGRITY, pages_per_mr, pages_per_mr); if (ret) { dev_err(queue->ctrl->ctrl.device, "failed to initialize PI MR pool sized %d for QID %d\n", queue->queue_size, nvme_rdma_queue_idx(queue)); goto out_destroy_mr_pool; } } set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); return 0; out_destroy_mr_pool: ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); out_destroy_ring: nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, sizeof(struct nvme_completion), DMA_FROM_DEVICE); out_destroy_qp: rdma_destroy_qp(queue->cm_id); out_destroy_ib_cq: nvme_rdma_free_cq(queue); out_put_dev: nvme_rdma_dev_put(queue->device); return ret; } static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, int idx, size_t queue_size) { struct nvme_rdma_queue *queue; struct sockaddr *src_addr = NULL; int ret; queue = &ctrl->queues[idx]; mutex_init(&queue->queue_lock); queue->ctrl = ctrl; if (idx && ctrl->ctrl.max_integrity_segments) queue->pi_support = true; else queue->pi_support = false; init_completion(&queue->cm_done); if (idx > 0) queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; else queue->cmnd_capsule_len = sizeof(struct nvme_command); queue->queue_size = queue_size; queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(queue->cm_id)) { dev_info(ctrl->ctrl.device, "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); ret = PTR_ERR(queue->cm_id); goto out_destroy_mutex; } if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) src_addr = (struct sockaddr *)&ctrl->src_addr; queue->cm_error = -ETIMEDOUT; ret = rdma_resolve_addr(queue->cm_id, src_addr, (struct sockaddr *)&ctrl->addr, NVME_RDMA_CM_TIMEOUT_MS); if (ret) { dev_info(ctrl->ctrl.device, "rdma_resolve_addr failed (%d).\n", ret); goto out_destroy_cm_id; } ret = nvme_rdma_wait_for_cm(queue); if (ret) { dev_info(ctrl->ctrl.device, "rdma connection establishment failed (%d)\n", ret); goto out_destroy_cm_id; } set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); return 0; out_destroy_cm_id: rdma_destroy_id(queue->cm_id); nvme_rdma_destroy_queue_ib(queue); out_destroy_mutex: mutex_destroy(&queue->queue_lock); return ret; } static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) { rdma_disconnect(queue->cm_id); ib_drain_qp(queue->qp); } static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) { mutex_lock(&queue->queue_lock); if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) __nvme_rdma_stop_queue(queue); mutex_unlock(&queue->queue_lock); } static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) { if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) return; rdma_destroy_id(queue->cm_id); nvme_rdma_destroy_queue_ib(queue); mutex_destroy(&queue->queue_lock); } static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) { int i; for (i = 1; i < ctrl->ctrl.queue_count; i++) nvme_rdma_free_queue(&ctrl->queues[i]); } static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) { int i; for (i = 1; i < ctrl->ctrl.queue_count; i++) nvme_rdma_stop_queue(&ctrl->queues[i]); } static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) { struct nvme_rdma_queue *queue = &ctrl->queues[idx]; int ret; if (idx) ret = nvmf_connect_io_queue(&ctrl->ctrl, idx); else ret = nvmf_connect_admin_queue(&ctrl->ctrl); if (!ret) { set_bit(NVME_RDMA_Q_LIVE, &queue->flags); } else { if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) __nvme_rdma_stop_queue(queue); dev_info(ctrl->ctrl.device, "failed to connect queue: %d ret=%d\n", idx, ret); } return ret; } static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl, int first, int last) { int i, ret = 0; for (i = first; i < last; i++) { ret = nvme_rdma_start_queue(ctrl, i); if (ret) goto out_stop_queues; } return 0; out_stop_queues: for (i--; i >= first; i--) nvme_rdma_stop_queue(&ctrl->queues[i]); return ret; } static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; unsigned int nr_io_queues; int i, ret; nr_io_queues = nvmf_nr_io_queues(opts); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) return ret; if (nr_io_queues == 0) { dev_err(ctrl->ctrl.device, "unable to set any I/O queues\n"); return -ENOMEM; } ctrl->ctrl.queue_count = nr_io_queues + 1; dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues); for (i = 1; i < ctrl->ctrl.queue_count; i++) { ret = nvme_rdma_alloc_queue(ctrl, i, ctrl->ctrl.sqsize + 1); if (ret) goto out_free_queues; } return 0; out_free_queues: for (i--; i >= 1; i--) nvme_rdma_free_queue(&ctrl->queues[i]); return ret; } static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl) { unsigned int cmd_size = sizeof(struct nvme_rdma_request) + NVME_RDMA_DATA_SGL_SIZE; if (ctrl->max_integrity_segments) cmd_size += sizeof(struct nvme_rdma_sgl) + NVME_RDMA_METADATA_SGL_SIZE; return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set, &nvme_rdma_mq_ops, ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, cmd_size); } static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl) { if (ctrl->async_event_sqe.data) { cancel_work_sync(&ctrl->ctrl.async_event_work); nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); ctrl->async_event_sqe.data = NULL; } nvme_rdma_free_queue(&ctrl->queues[0]); } static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, bool new) { bool pi_capable = false; int error; error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); if (error) return error; ctrl->device = ctrl->queues[0].device; ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); /* T10-PI support */ if (ctrl->device->dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER) pi_capable = true; ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, pi_capable); /* * Bind the async event SQE DMA mapping to the admin queue lifetime. * It's safe, since any chage in the underlying RDMA device will issue * error recovery and queue re-creation. */ error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); if (error) goto out_free_queue; if (new) { error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops, sizeof(struct nvme_rdma_request) + NVME_RDMA_DATA_SGL_SIZE); if (error) goto out_free_async_qe; } error = nvme_rdma_start_queue(ctrl, 0); if (error) goto out_remove_admin_tag_set; error = nvme_enable_ctrl(&ctrl->ctrl); if (error) goto out_stop_queue; ctrl->ctrl.max_segments = ctrl->max_fr_pages; ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); if (pi_capable) ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; else ctrl->ctrl.max_integrity_segments = 0; nvme_unquiesce_admin_queue(&ctrl->ctrl); error = nvme_init_ctrl_finish(&ctrl->ctrl, false); if (error) goto out_quiesce_queue; return 0; out_quiesce_queue: nvme_quiesce_admin_queue(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); out_stop_queue: nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_cancel_admin_tagset(&ctrl->ctrl); out_remove_admin_tag_set: if (new) nvme_remove_admin_tag_set(&ctrl->ctrl); out_free_async_qe: if (ctrl->async_event_sqe.data) { nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); ctrl->async_event_sqe.data = NULL; } out_free_queue: nvme_rdma_free_queue(&ctrl->queues[0]); return error; } static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) { int ret, nr_queues; ret = nvme_rdma_alloc_io_queues(ctrl); if (ret) return ret; if (new) { ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl); if (ret) goto out_free_io_queues; } /* * Only start IO queues for which we have allocated the tagset * and limitted it to the available queues. On reconnects, the * queue number might have changed. */ nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues); if (ret) goto out_cleanup_tagset; if (!new) { nvme_start_freeze(&ctrl->ctrl); nvme_unquiesce_io_queues(&ctrl->ctrl); if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { /* * If we timed out waiting for freeze we are likely to * be stuck. Fail the controller initialization just * to be safe. */ ret = -ENODEV; nvme_unfreeze(&ctrl->ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, ctrl->ctrl.queue_count - 1); nvme_unfreeze(&ctrl->ctrl); } /* * If the number of queues has increased (reconnect case) * start all new queues now. */ ret = nvme_rdma_start_io_queues(ctrl, nr_queues, ctrl->tag_set.nr_hw_queues + 1); if (ret) goto out_wait_freeze_timed_out; return 0; out_wait_freeze_timed_out: nvme_quiesce_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); out_cleanup_tagset: nvme_cancel_tagset(&ctrl->ctrl); if (new) nvme_remove_io_tag_set(&ctrl->ctrl); out_free_io_queues: nvme_rdma_free_io_queues(ctrl); return ret; } static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { nvme_quiesce_admin_queue(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_cancel_admin_tagset(&ctrl->ctrl); if (remove) { nvme_unquiesce_admin_queue(&ctrl->ctrl); nvme_remove_admin_tag_set(&ctrl->ctrl); } nvme_rdma_destroy_admin_queue(ctrl); } static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { if (ctrl->ctrl.queue_count > 1) { nvme_quiesce_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); nvme_cancel_tagset(&ctrl->ctrl); if (remove) { nvme_unquiesce_io_queues(&ctrl->ctrl); nvme_remove_io_tag_set(&ctrl->ctrl); } nvme_rdma_free_io_queues(ctrl); } } static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); flush_work(&ctrl->err_work); cancel_delayed_work_sync(&ctrl->reconnect_work); } static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); if (list_empty(&ctrl->list)) goto free_ctrl; mutex_lock(&nvme_rdma_ctrl_mutex); list_del(&ctrl->list); mutex_unlock(&nvme_rdma_ctrl_mutex); nvmf_free_options(nctrl->opts); free_ctrl: kfree(ctrl->queues); kfree(ctrl); } static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) { /* If we are resetting/deleting then do nothing */ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || ctrl->ctrl.state == NVME_CTRL_LIVE); return; } if (nvmf_should_reconnect(&ctrl->ctrl)) { dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", ctrl->ctrl.opts->reconnect_delay); queue_delayed_work(nvme_wq, &ctrl->reconnect_work, ctrl->ctrl.opts->reconnect_delay * HZ); } else { nvme_delete_ctrl(&ctrl->ctrl); } } static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) { int ret; bool changed; ret = nvme_rdma_configure_admin_queue(ctrl, new); if (ret) return ret; if (ctrl->ctrl.icdoff) { ret = -EOPNOTSUPP; dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); goto destroy_admin; } if (!(ctrl->ctrl.sgls & (1 << 2))) { ret = -EOPNOTSUPP; dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not supported!\n"); goto destroy_admin; } if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { dev_warn(ctrl->ctrl.device, "queue_size %zu > ctrl sqsize %u, clamping down\n", ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); } if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { dev_warn(ctrl->ctrl.device, "ctrl sqsize %u > max queue size %u, clamping down\n", ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; } if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { dev_warn(ctrl->ctrl.device, "sqsize %u > ctrl maxcmd %u, clamping down\n", ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; } if (ctrl->ctrl.sgls & (1 << 20)) ctrl->use_inline_data = true; if (ctrl->ctrl.queue_count > 1) { ret = nvme_rdma_configure_io_queues(ctrl, new); if (ret) goto destroy_admin; } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); if (!changed) { /* * state change failure is ok if we started ctrl delete, * unless we're during creation of a new controller to * avoid races with teardown flow. */ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); WARN_ON_ONCE(new); ret = -EINVAL; goto destroy_io; } nvme_start_ctrl(&ctrl->ctrl); return 0; destroy_io: if (ctrl->ctrl.queue_count > 1) { nvme_quiesce_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); nvme_cancel_tagset(&ctrl->ctrl); if (new) nvme_remove_io_tag_set(&ctrl->ctrl); nvme_rdma_free_io_queues(ctrl); } destroy_admin: nvme_quiesce_admin_queue(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_cancel_admin_tagset(&ctrl->ctrl); if (new) nvme_remove_admin_tag_set(&ctrl->ctrl); nvme_rdma_destroy_admin_queue(ctrl); return ret; } static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_rdma_ctrl, reconnect_work); ++ctrl->ctrl.nr_reconnects; if (nvme_rdma_setup_ctrl(ctrl, false)) goto requeue; dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", ctrl->ctrl.nr_reconnects); ctrl->ctrl.nr_reconnects = 0; return; requeue: dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", ctrl->ctrl.nr_reconnects); nvme_rdma_reconnect_or_remove(ctrl); } static void nvme_rdma_error_recovery_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, err_work); nvme_stop_keep_alive(&ctrl->ctrl); flush_work(&ctrl->ctrl.async_event_work); nvme_rdma_teardown_io_queues(ctrl, false); nvme_unquiesce_io_queues(&ctrl->ctrl); nvme_rdma_teardown_admin_queue(ctrl, false); nvme_unquiesce_admin_queue(&ctrl->ctrl); nvme_auth_stop(&ctrl->ctrl); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { /* state change failure is ok if we started ctrl delete */ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); return; } nvme_rdma_reconnect_or_remove(ctrl); } static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) { if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) return; dev_warn(ctrl->ctrl.device, "starting error recovery\n"); queue_work(nvme_reset_wq, &ctrl->err_work); } static void nvme_rdma_end_request(struct nvme_rdma_request *req) { struct request *rq = blk_mq_rq_from_pdu(req); if (!refcount_dec_and_test(&req->ref)) return; if (!nvme_try_complete_req(rq, req->status, req->result)) nvme_rdma_complete_rq(rq); } static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, const char *op) { struct nvme_rdma_queue *queue = wc->qp->qp_context; struct nvme_rdma_ctrl *ctrl = queue->ctrl; if (ctrl->ctrl.state == NVME_CTRL_LIVE) dev_info(ctrl->ctrl.device, "%s for CQE 0x%p failed with status %s (%d)\n", op, wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); nvme_rdma_error_recovery(ctrl); } static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) { if (unlikely(wc->status != IB_WC_SUCCESS)) nvme_rdma_wr_error(cq, wc, "MEMREG"); } static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvme_rdma_request *req = container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); if (unlikely(wc->status != IB_WC_SUCCESS)) nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); else nvme_rdma_end_request(req); } static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req) { struct ib_send_wr wr = { .opcode = IB_WR_LOCAL_INV, .next = NULL, .num_sge = 0, .send_flags = IB_SEND_SIGNALED, .ex.invalidate_rkey = req->mr->rkey, }; req->reg_cqe.done = nvme_rdma_inv_rkey_done; wr.wr_cqe = &req->reg_cqe; return ib_post_send(queue->qp, &wr, NULL); } static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); if (blk_integrity_rq(rq)) { ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, req->metadata_sgl->nents, rq_dma_dir(rq)); sg_free_table_chained(&req->metadata_sgl->sg_table, NVME_INLINE_METADATA_SG_CNT); } ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, rq_dma_dir(rq)); sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); } static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_device *dev = queue->device; struct ib_device *ibdev = dev->dev; struct list_head *pool = &queue->qp->rdma_mrs; if (!blk_rq_nr_phys_segments(rq)) return; if (req->use_sig_mr) pool = &queue->qp->sig_mrs; if (req->mr) { ib_mr_pool_put(queue->qp, pool, req->mr); req->mr = NULL; } nvme_rdma_dma_unmap_req(ibdev, rq); } static int nvme_rdma_set_sg_null(struct nvme_command *c) { struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; sg->addr = 0; put_unaligned_le24(0, sg->length); put_unaligned_le32(0, sg->key); sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; return 0; } static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req, struct nvme_command *c, int count) { struct nvme_sgl_desc *sg = &c->common.dptr.sgl; struct ib_sge *sge = &req->sge[1]; struct scatterlist *sgl; u32 len = 0; int i; for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { sge->addr = sg_dma_address(sgl); sge->length = sg_dma_len(sgl); sge->lkey = queue->device->pd->local_dma_lkey; len += sge->length; sge++; } sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); sg->length = cpu_to_le32(len); sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; req->num_sge += count; return 0; } static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req, struct nvme_command *c) { struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl)); put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length); put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; return 0; } static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req, struct nvme_command *c, int count) { struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; int nr; req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); if (WARN_ON_ONCE(!req->mr)) return -EAGAIN; /* * Align the MR to a 4K page size to match the ctrl page size and * the block virtual boundary. */ nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL, SZ_4K); if (unlikely(nr < count)) { ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); req->mr = NULL; if (nr < 0) return nr; return -EINVAL; } ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); req->reg_cqe.done = nvme_rdma_memreg_done; memset(&req->reg_wr, 0, sizeof(req->reg_wr)); req->reg_wr.wr.opcode = IB_WR_REG_MR; req->reg_wr.wr.wr_cqe = &req->reg_cqe; req->reg_wr.wr.num_sge = 0; req->reg_wr.mr = req->mr; req->reg_wr.key = req->mr->rkey; req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; sg->addr = cpu_to_le64(req->mr->iova); put_unaligned_le24(req->mr->length, sg->length); put_unaligned_le32(req->mr->rkey, sg->key); sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_INVALIDATE; return 0; } static void nvme_rdma_set_sig_domain(struct blk_integrity *bi, struct nvme_command *cmd, struct ib_sig_domain *domain, u16 control, u8 pi_type) { domain->sig_type = IB_SIG_TYPE_T10_DIF; domain->sig.dif.bg_type = IB_T10DIF_CRC; domain->sig.dif.pi_interval = 1 << bi->interval_exp; domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); if (control & NVME_RW_PRINFO_PRCHK_REF) domain->sig.dif.ref_remap = true; domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); domain->sig.dif.app_escape = true; if (pi_type == NVME_NS_DPS_PI_TYPE3) domain->sig.dif.ref_escape = true; } static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi, struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs, u8 pi_type) { u16 control = le16_to_cpu(cmd->rw.control); memset(sig_attrs, 0, sizeof(*sig_attrs)); if (control & NVME_RW_PRINFO_PRACT) { /* for WRITE_INSERT/READ_STRIP no memory domain */ sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, pi_type); /* Clear the PRACT bit since HCA will generate/verify the PI */ control &= ~NVME_RW_PRINFO_PRACT; cmd->rw.control = cpu_to_le16(control); } else { /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, pi_type); nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, pi_type); } } static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask) { *mask = 0; if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF) *mask |= IB_SIG_CHECK_REFTAG; if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD) *mask |= IB_SIG_CHECK_GUARD; } static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc) { if (unlikely(wc->status != IB_WC_SUCCESS)) nvme_rdma_wr_error(cq, wc, "SIG"); } static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req, struct nvme_command *c, int count, int pi_count) { struct nvme_rdma_sgl *sgl = &req->data_sgl; struct ib_reg_wr *wr = &req->reg_wr; struct request *rq = blk_mq_rq_from_pdu(req); struct nvme_ns *ns = rq->q->queuedata; struct bio *bio = rq->bio; struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; int nr; req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); if (WARN_ON_ONCE(!req->mr)) return -EAGAIN; nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL, req->metadata_sgl->sg_table.sgl, pi_count, NULL, SZ_4K); if (unlikely(nr)) goto mr_put; nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, req->mr->sig_attrs, ns->pi_type); nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); req->reg_cqe.done = nvme_rdma_sig_done; memset(wr, 0, sizeof(*wr)); wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; wr->wr.wr_cqe = &req->reg_cqe; wr->wr.num_sge = 0; wr->wr.send_flags = 0; wr->mr = req->mr; wr->key = req->mr->rkey; wr->access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; sg->addr = cpu_to_le64(req->mr->iova); put_unaligned_le24(req->mr->length, sg->length); put_unaligned_le32(req->mr->rkey, sg->key); sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; return 0; mr_put: ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); req->mr = NULL; if (nr < 0) return nr; return -EINVAL; } static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, int *count, int *pi_count) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); int ret; req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); ret = sg_alloc_table_chained(&req->data_sgl.sg_table, blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl, NVME_INLINE_SG_CNT); if (ret) return -ENOMEM; req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, req->data_sgl.sg_table.sgl); *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, rq_dma_dir(rq)); if (unlikely(*count <= 0)) { ret = -EIO; goto out_free_table; } if (blk_integrity_rq(rq)) { req->metadata_sgl->sg_table.sgl = (struct scatterlist *)(req->metadata_sgl + 1); ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, blk_rq_count_integrity_sg(rq->q, rq->bio), req->metadata_sgl->sg_table.sgl, NVME_INLINE_METADATA_SG_CNT); if (unlikely(ret)) { ret = -ENOMEM; goto out_unmap_sg; } req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, rq->bio, req->metadata_sgl->sg_table.sgl); *pi_count = ib_dma_map_sg(ibdev, req->metadata_sgl->sg_table.sgl, req->metadata_sgl->nents, rq_dma_dir(rq)); if (unlikely(*pi_count <= 0)) { ret = -EIO; goto out_free_pi_table; } } return 0; out_free_pi_table: sg_free_table_chained(&req->metadata_sgl->sg_table, NVME_INLINE_METADATA_SG_CNT); out_unmap_sg: ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, rq_dma_dir(rq)); out_free_table: sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); return ret; } static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, struct request *rq, struct nvme_command *c) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_device *dev = queue->device; struct ib_device *ibdev = dev->dev; int pi_count = 0; int count, ret; req->num_sge = 1; refcount_set(&req->ref, 2); /* send and recv completions */ c->common.flags |= NVME_CMD_SGL_METABUF; if (!blk_rq_nr_phys_segments(rq)) return nvme_rdma_set_sg_null(c); ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count); if (unlikely(ret)) return ret; if (req->use_sig_mr) { ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); goto out; } if (count <= dev->num_inline_segments) { if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && queue->ctrl->use_inline_data && blk_rq_payload_bytes(rq) <= nvme_rdma_inline_data_size(queue)) { ret = nvme_rdma_map_sg_inline(queue, req, c, count); goto out; } if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { ret = nvme_rdma_map_sg_single(queue, req, c); goto out; } } ret = nvme_rdma_map_sg_fr(queue, req, c, count); out: if (unlikely(ret)) goto out_dma_unmap_req; return 0; out_dma_unmap_req: nvme_rdma_dma_unmap_req(ibdev, rq); return ret; } static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvme_rdma_qe *qe = container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); struct nvme_rdma_request *req = container_of(qe, struct nvme_rdma_request, sqe); if (unlikely(wc->status != IB_WC_SUCCESS)) nvme_rdma_wr_error(cq, wc, "SEND"); else nvme_rdma_end_request(req); } static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, struct ib_send_wr *first) { struct ib_send_wr wr; int ret; sge->addr = qe->dma; sge->length = sizeof(struct nvme_command); sge->lkey = queue->device->pd->local_dma_lkey; wr.next = NULL; wr.wr_cqe = &qe->cqe; wr.sg_list = sge; wr.num_sge = num_sge; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; if (first) first->next = &wr; else first = &wr; ret = ib_post_send(queue->qp, first, NULL); if (unlikely(ret)) { dev_err(queue->ctrl->ctrl.device, "%s failed with error code %d\n", __func__, ret); } return ret; } static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, struct nvme_rdma_qe *qe) { struct ib_recv_wr wr; struct ib_sge list; int ret; list.addr = qe->dma; list.length = sizeof(struct nvme_completion); list.lkey = queue->device->pd->local_dma_lkey; qe->cqe.done = nvme_rdma_recv_done; wr.next = NULL; wr.wr_cqe = &qe->cqe; wr.sg_list = &list; wr.num_sge = 1; ret = ib_post_recv(queue->qp, &wr, NULL); if (unlikely(ret)) { dev_err(queue->ctrl->ctrl.device, "%s failed with error code %d\n", __func__, ret); } return ret; } static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) { u32 queue_idx = nvme_rdma_queue_idx(queue); if (queue_idx == 0) return queue->ctrl->admin_tag_set.tags[queue_idx]; return queue->ctrl->tag_set.tags[queue_idx - 1]; } static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) { if (unlikely(wc->status != IB_WC_SUCCESS)) nvme_rdma_wr_error(cq, wc, "ASYNC"); } static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); struct nvme_rdma_queue *queue = &ctrl->queues[0]; struct ib_device *dev = queue->device->dev; struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; struct nvme_command *cmd = sqe->data; struct ib_sge sge; int ret; ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); memset(cmd, 0, sizeof(*cmd)); cmd->common.opcode = nvme_admin_async_event; cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; cmd->common.flags |= NVME_CMD_SGL_METABUF; nvme_rdma_set_sg_null(cmd); sqe->cqe.done = nvme_rdma_async_done; ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); WARN_ON_ONCE(ret); } static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, struct nvme_completion *cqe, struct ib_wc *wc) { struct request *rq; struct nvme_rdma_request *req; rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id); if (!rq) { dev_err(queue->ctrl->ctrl.device, "got bad command_id %#x on QP %#x\n", cqe->command_id, queue->qp->qp_num); nvme_rdma_error_recovery(queue->ctrl); return; } req = blk_mq_rq_to_pdu(rq); req->status = cqe->status; req->result = cqe->result; if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { if (unlikely(!req->mr || wc->ex.invalidate_rkey != req->mr->rkey)) { dev_err(queue->ctrl->ctrl.device, "Bogus remote invalidation for rkey %#x\n", req->mr ? req->mr->rkey : 0); nvme_rdma_error_recovery(queue->ctrl); } } else if (req->mr) { int ret; ret = nvme_rdma_inv_rkey(queue, req); if (unlikely(ret < 0)) { dev_err(queue->ctrl->ctrl.device, "Queueing INV WR for rkey %#x failed (%d)\n", req->mr->rkey, ret); nvme_rdma_error_recovery(queue->ctrl); } /* the local invalidation completion will end the request */ return; } nvme_rdma_end_request(req); } static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvme_rdma_qe *qe = container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); struct nvme_rdma_queue *queue = wc->qp->qp_context; struct ib_device *ibdev = queue->device->dev; struct nvme_completion *cqe = qe->data; const size_t len = sizeof(struct nvme_completion); if (unlikely(wc->status != IB_WC_SUCCESS)) { nvme_rdma_wr_error(cq, wc, "RECV"); return; } /* sanity checking for received data length */ if (unlikely(wc->byte_len < len)) { dev_err(queue->ctrl->ctrl.device, "Unexpected nvme completion length(%d)\n", wc->byte_len); nvme_rdma_error_recovery(queue->ctrl); return; } ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); /* * AEN requests are special as they don't time out and can * survive any kind of queue freeze and often don't respond to * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), cqe->command_id))) nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); else nvme_rdma_process_nvme_rsp(queue, cqe, wc); ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); nvme_rdma_post_recv(queue, qe); } static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) { int ret, i; for (i = 0; i < queue->queue_size; i++) { ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); if (ret) return ret; } return 0; } static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, struct rdma_cm_event *ev) { struct rdma_cm_id *cm_id = queue->cm_id; int status = ev->status; const char *rej_msg; const struct nvme_rdma_cm_rej *rej_data; u8 rej_data_len; rej_msg = rdma_reject_msg(cm_id, status); rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len); if (rej_data && rej_data_len >= sizeof(u16)) { u16 sts = le16_to_cpu(rej_data->sts); dev_err(queue->ctrl->ctrl.device, "Connect rejected: status %d (%s) nvme status %d (%s).\n", status, rej_msg, sts, nvme_rdma_cm_msg(sts)); } else { dev_err(queue->ctrl->ctrl.device, "Connect rejected: status %d (%s).\n", status, rej_msg); } return -ECONNRESET; } static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) { struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; int ret; ret = nvme_rdma_create_queue_ib(queue); if (ret) return ret; if (ctrl->opts->tos >= 0) rdma_set_service_type(queue->cm_id, ctrl->opts->tos); ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS); if (ret) { dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", queue->cm_error); goto out_destroy_queue; } return 0; out_destroy_queue: nvme_rdma_destroy_queue_ib(queue); return ret; } static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) { struct nvme_rdma_ctrl *ctrl = queue->ctrl; struct rdma_conn_param param = { }; struct nvme_rdma_cm_req priv = { }; int ret; param.qp_num = queue->qp->qp_num; param.flow_control = 1; param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; /* maximum retry count */ param.retry_count = 7; param.rnr_retry_count = 7; param.private_data = &priv; param.private_data_len = sizeof(priv); priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); /* * set the admin queue depth to the minimum size * specified by the Fabrics standard. */ if (priv.qid == 0) { priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH); priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); } else { /* * current interpretation of the fabrics spec * is at minimum you make hrqsize sqsize+1, or a * 1's based representation of sqsize. */ priv.hrqsize = cpu_to_le16(queue->queue_size); priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); } ret = rdma_connect_locked(queue->cm_id, &param); if (ret) { dev_err(ctrl->ctrl.device, "rdma_connect_locked failed (%d).\n", ret); return ret; } return 0; } static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *ev) { struct nvme_rdma_queue *queue = cm_id->context; int cm_error = 0; dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", rdma_event_msg(ev->event), ev->event, ev->status, cm_id); switch (ev->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: cm_error = nvme_rdma_addr_resolved(queue); break; case RDMA_CM_EVENT_ROUTE_RESOLVED: cm_error = nvme_rdma_route_resolved(queue); break; case RDMA_CM_EVENT_ESTABLISHED: queue->cm_error = nvme_rdma_conn_established(queue); /* complete cm_done regardless of success/failure */ complete(&queue->cm_done); return 0; case RDMA_CM_EVENT_REJECTED: cm_error = nvme_rdma_conn_rejected(queue, ev); break; case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_ADDR_ERROR: dev_dbg(queue->ctrl->ctrl.device, "CM error event %d\n", ev->event); cm_error = -ECONNRESET; break; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_TIMEWAIT_EXIT: dev_dbg(queue->ctrl->ctrl.device, "disconnect received - connection closed\n"); nvme_rdma_error_recovery(queue->ctrl); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: /* device removal is handled via the ib_client API */ break; default: dev_err(queue->ctrl->ctrl.device, "Unexpected RDMA CM event (%d)\n", ev->event); nvme_rdma_error_recovery(queue->ctrl); break; } if (cm_error) { queue->cm_error = cm_error; complete(&queue->cm_done); } return 0; } static void nvme_rdma_complete_timed_out(struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_queue *queue = req->queue; nvme_rdma_stop_queue(queue); nvmf_complete_timed_out_request(rq); } static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_queue *queue = req->queue; struct nvme_rdma_ctrl *ctrl = queue->ctrl; dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", rq->tag, nvme_rdma_queue_idx(queue)); if (ctrl->ctrl.state != NVME_CTRL_LIVE) { /* * If we are resetting, connecting or deleting we should * complete immediately because we may block controller * teardown or setup sequence * - ctrl disable/shutdown fabrics requests * - connect requests * - initialization admin requests * - I/O requests that entered after unquiescing and * the controller stopped responding * * All other requests should be cancelled by the error * recovery work, so it's fine that we fail it here. */ nvme_rdma_complete_timed_out(rq); return BLK_EH_DONE; } /* * LIVE state should trigger the normal error recovery which will * handle completing this request. */ nvme_rdma_error_recovery(ctrl); return BLK_EH_RESET_TIMER; } static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nvme_ns *ns = hctx->queue->queuedata; struct nvme_rdma_queue *queue = hctx->driver_data; struct request *rq = bd->rq; struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_qe *sqe = &req->sqe; struct nvme_command *c = nvme_req(rq)->cmd; struct ib_device *dev; bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); blk_status_t ret; int err; WARN_ON_ONCE(rq->tag < 0); if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); dev = queue->device->dev; req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, sizeof(struct nvme_command), DMA_TO_DEVICE); err = ib_dma_mapping_error(dev, req->sqe.dma); if (unlikely(err)) return BLK_STS_RESOURCE; ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); ret = nvme_setup_cmd(ns, rq); if (ret) goto unmap_qe; nvme_start_request(rq); if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && queue->pi_support && (c->common.opcode == nvme_cmd_write || c->common.opcode == nvme_cmd_read) && nvme_ns_has_pi(ns)) req->use_sig_mr = true; else req->use_sig_mr = false; err = nvme_rdma_map_data(queue, rq, c); if (unlikely(err < 0)) { dev_err(queue->ctrl->ctrl.device, "Failed to map data (%d)\n", err); goto err; } sqe->cqe.done = nvme_rdma_send_done; ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, req->mr ? &req->reg_wr.wr : NULL); if (unlikely(err)) goto err_unmap; return BLK_STS_OK; err_unmap: nvme_rdma_unmap_data(queue, rq); err: if (err == -EIO) ret = nvme_host_path_error(rq); else if (err == -ENOMEM || err == -EAGAIN) ret = BLK_STS_RESOURCE; else ret = BLK_STS_IOERR; nvme_cleanup_cmd(rq); unmap_qe: ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), DMA_TO_DEVICE); return ret; } static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_rdma_queue *queue = hctx->driver_data; return ib_process_cq_direct(queue->ib_cq, -1); } static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req) { struct request *rq = blk_mq_rq_from_pdu(req); struct ib_mr_status mr_status; int ret; ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { pr_err("ib_check_mr_status failed, ret %d\n", ret); nvme_req(rq)->status = NVME_SC_INVALID_PI; return; } if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { switch (mr_status.sig_err.err_type) { case IB_SIG_BAD_GUARD: nvme_req(rq)->status = NVME_SC_GUARD_CHECK; break; case IB_SIG_BAD_REFTAG: nvme_req(rq)->status = NVME_SC_REFTAG_CHECK; break; case IB_SIG_BAD_APPTAG: nvme_req(rq)->status = NVME_SC_APPTAG_CHECK; break; } pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n", mr_status.sig_err.err_type, mr_status.sig_err.expected, mr_status.sig_err.actual); } } static void nvme_rdma_complete_rq(struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_queue *queue = req->queue; struct ib_device *ibdev = queue->device->dev; if (req->use_sig_mr) nvme_rdma_check_pi_status(req); nvme_rdma_unmap_data(queue, rq); ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), DMA_TO_DEVICE); nvme_complete_rq(rq); } static void nvme_rdma_map_queues(struct blk_mq_tag_set *set) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); } static const struct blk_mq_ops nvme_rdma_mq_ops = { .queue_rq = nvme_rdma_queue_rq, .complete = nvme_rdma_complete_rq, .init_request = nvme_rdma_init_request, .exit_request = nvme_rdma_exit_request, .init_hctx = nvme_rdma_init_hctx, .timeout = nvme_rdma_timeout, .map_queues = nvme_rdma_map_queues, .poll = nvme_rdma_poll, }; static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { .queue_rq = nvme_rdma_queue_rq, .complete = nvme_rdma_complete_rq, .init_request = nvme_rdma_init_request, .exit_request = nvme_rdma_exit_request, .init_hctx = nvme_rdma_init_admin_hctx, .timeout = nvme_rdma_timeout, }; static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { nvme_rdma_teardown_io_queues(ctrl, shutdown); nvme_quiesce_admin_queue(&ctrl->ctrl); nvme_disable_ctrl(&ctrl->ctrl, shutdown); nvme_rdma_teardown_admin_queue(ctrl, shutdown); } static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl) { nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true); } static void nvme_rdma_reset_ctrl_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); nvme_stop_ctrl(&ctrl->ctrl); nvme_rdma_shutdown_ctrl(ctrl, false); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { /* state change failure should never happen */ WARN_ON_ONCE(1); return; } if (nvme_rdma_setup_ctrl(ctrl, false)) goto out_fail; return; out_fail: ++ctrl->ctrl.nr_reconnects; nvme_rdma_reconnect_or_remove(ctrl); } static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .name = "rdma", .module = THIS_MODULE, .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED, .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, .free_ctrl = nvme_rdma_free_ctrl, .submit_async_event = nvme_rdma_submit_async_event, .delete_ctrl = nvme_rdma_delete_ctrl, .get_address = nvmf_get_address, .stop_ctrl = nvme_rdma_stop_ctrl, }; /* * Fails a connection request if it matches an existing controller * (association) with the same tuple: * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN> * * if local address is not specified in the request, it will match an * existing controller with all the other parameters the same and no * local port address specified as well. * * The ports don't need to be compared as they are intrinsically * already matched by the port pointers supplied. */ static bool nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts) { struct nvme_rdma_ctrl *ctrl; bool found = false; mutex_lock(&nvme_rdma_ctrl_mutex); list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { found = nvmf_ip_options_match(&ctrl->ctrl, opts); if (found) break; } mutex_unlock(&nvme_rdma_ctrl_mutex); return found; } static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { struct nvme_rdma_ctrl *ctrl; int ret; bool changed; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return ERR_PTR(-ENOMEM); ctrl->ctrl.opts = opts; INIT_LIST_HEAD(&ctrl->list); if (!(opts->mask & NVMF_OPT_TRSVCID)) { opts->trsvcid = kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL); if (!opts->trsvcid) { ret = -ENOMEM; goto out_free_ctrl; } opts->mask |= NVMF_OPT_TRSVCID; } ret = inet_pton_with_scope(&init_net, AF_UNSPEC, opts->traddr, opts->trsvcid, &ctrl->addr); if (ret) { pr_err("malformed address passed: %s:%s\n", opts->traddr, opts->trsvcid); goto out_free_ctrl; } if (opts->mask & NVMF_OPT_HOST_TRADDR) { ret = inet_pton_with_scope(&init_net, AF_UNSPEC, opts->host_traddr, NULL, &ctrl->src_addr); if (ret) { pr_err("malformed src address passed: %s\n", opts->host_traddr); goto out_free_ctrl; } } if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) { ret = -EALREADY; goto out_free_ctrl; } INIT_DELAYED_WORK(&ctrl->reconnect_work, nvme_rdma_reconnect_ctrl_work); INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + opts->nr_poll_queues + 1; ctrl->ctrl.sqsize = opts->queue_size - 1; ctrl->ctrl.kato = opts->kato; ret = -ENOMEM; ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), GFP_KERNEL); if (!ctrl->queues) goto out_free_ctrl; ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, 0 /* no quirks, we're perfect! */); if (ret) goto out_kfree_queues; changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); WARN_ON_ONCE(!changed); ret = nvme_rdma_setup_ctrl(ctrl, true); if (ret) goto out_uninit_ctrl; dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); mutex_lock(&nvme_rdma_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); mutex_unlock(&nvme_rdma_ctrl_mutex); return &ctrl->ctrl; out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); out_kfree_queues: kfree(ctrl->queues); out_free_ctrl: kfree(ctrl); return ERR_PTR(ret); } static struct nvmf_transport_ops nvme_rdma_transport = { .name = "rdma", .module = THIS_MODULE, .required_opts = NVMF_OPT_TRADDR, .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | NVMF_OPT_TOS, .create_ctrl = nvme_rdma_create_ctrl, }; static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) { struct nvme_rdma_ctrl *ctrl; struct nvme_rdma_device *ndev; bool found = false; mutex_lock(&device_list_mutex); list_for_each_entry(ndev, &device_list, entry) { if (ndev->dev == ib_device) { found = true; break; } } mutex_unlock(&device_list_mutex); if (!found) return; /* Delete all controllers using this device */ mutex_lock(&nvme_rdma_ctrl_mutex); list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { if (ctrl->device->dev != ib_device) continue; nvme_delete_ctrl(&ctrl->ctrl); } mutex_unlock(&nvme_rdma_ctrl_mutex); flush_workqueue(nvme_delete_wq); } static struct ib_client nvme_rdma_ib_client = { .name = "nvme_rdma", .remove = nvme_rdma_remove_one }; static int __init nvme_rdma_init_module(void) { int ret; ret = ib_register_client(&nvme_rdma_ib_client); if (ret) return ret; ret = nvmf_register_transport(&nvme_rdma_transport); if (ret) goto err_unreg_client; return 0; err_unreg_client: ib_unregister_client(&nvme_rdma_ib_client); return ret; } static void __exit nvme_rdma_cleanup_module(void) { struct nvme_rdma_ctrl *ctrl; nvmf_unregister_transport(&nvme_rdma_transport); ib_unregister_client(&nvme_rdma_ib_client); mutex_lock(&nvme_rdma_ctrl_mutex); list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) nvme_delete_ctrl(&ctrl->ctrl); mutex_unlock(&nvme_rdma_ctrl_mutex); flush_workqueue(nvme_delete_wq); } module_init(nvme_rdma_init_module); module_exit(nvme_rdma_cleanup_module); MODULE_LICENSE("GPL v2");
linux-master
drivers/nvme/host/rdma.c
// SPDX-License-Identifier: GPL-2.0 /* * NVM Express device driver * Copyright (c) 2011-2014, Intel Corporation. */ #include <linux/acpi.h> #include <linux/async.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/blk-mq-pci.h> #include <linux/blk-integrity.h> #include <linux/dmi.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kstrtox.h> #include <linux/memremap.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/once.h> #include <linux/pci.h> #include <linux/suspend.h> #include <linux/t10-pi.h> #include <linux/types.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/sed-opal.h> #include <linux/pci-p2pdma.h> #include "trace.h" #include "nvme.h" #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) #define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) /* * These can be higher, but we need to ensure that any command doesn't * require an sg allocation that needs more than a page of data. */ #define NVME_MAX_KB_SZ 8192 #define NVME_MAX_SEGS 128 #define NVME_MAX_NR_ALLOCATIONS 5 static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0444); static bool use_cmb_sqes = true; module_param(use_cmb_sqes, bool, 0444); MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); static unsigned int max_host_mem_size_mb = 128; module_param(max_host_mem_size_mb, uint, 0444); MODULE_PARM_DESC(max_host_mem_size_mb, "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); static unsigned int sgl_threshold = SZ_32K; module_param(sgl_threshold, uint, 0644); MODULE_PARM_DESC(sgl_threshold, "Use SGLs when average request segment size is larger or equal to " "this size. Use 0 to disable SGLs."); #define NVME_PCI_MIN_QUEUE_SIZE 2 #define NVME_PCI_MAX_QUEUE_SIZE 4095 static int io_queue_depth_set(const char *val, const struct kernel_param *kp); static const struct kernel_param_ops io_queue_depth_ops = { .set = io_queue_depth_set, .get = param_get_uint, }; static unsigned int io_queue_depth = 1024; module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); static int io_queue_count_set(const char *val, const struct kernel_param *kp) { unsigned int n; int ret; ret = kstrtouint(val, 10, &n); if (ret != 0 || n > num_possible_cpus()) return -EINVAL; return param_set_uint(val, kp); } static const struct kernel_param_ops io_queue_count_ops = { .set = io_queue_count_set, .get = param_get_uint, }; static unsigned int write_queues; module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); MODULE_PARM_DESC(write_queues, "Number of queues to use for writes. If not set, reads and writes " "will share a queue set."); static unsigned int poll_queues; module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); static bool noacpi; module_param(noacpi, bool, 0444); MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); struct nvme_dev; struct nvme_queue; static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); static void nvme_delete_io_queues(struct nvme_dev *dev); static void nvme_update_attrs(struct nvme_dev *dev); /* * Represents an NVM Express device. Each nvme_dev is a PCI function. */ struct nvme_dev { struct nvme_queue *queues; struct blk_mq_tag_set tagset; struct blk_mq_tag_set admin_tagset; u32 __iomem *dbs; struct device *dev; struct dma_pool *prp_page_pool; struct dma_pool *prp_small_pool; unsigned online_queues; unsigned max_qid; unsigned io_queues[HCTX_MAX_TYPES]; unsigned int num_vecs; u32 q_depth; int io_sqes; u32 db_stride; void __iomem *bar; unsigned long bar_mapped_size; struct mutex shutdown_lock; bool subsystem; u64 cmb_size; bool cmb_use_sqes; u32 cmbsz; u32 cmbloc; struct nvme_ctrl ctrl; u32 last_ps; bool hmb; mempool_t *iod_mempool; /* shadow doorbell buffer support: */ __le32 *dbbuf_dbs; dma_addr_t dbbuf_dbs_dma_addr; __le32 *dbbuf_eis; dma_addr_t dbbuf_eis_dma_addr; /* host memory buffer support: */ u64 host_mem_size; u32 nr_host_mem_descs; dma_addr_t host_mem_descs_dma; struct nvme_host_mem_buf_desc *host_mem_descs; void **host_mem_desc_bufs; unsigned int nr_allocated_queues; unsigned int nr_write_queues; unsigned int nr_poll_queues; }; static int io_queue_depth_set(const char *val, const struct kernel_param *kp) { return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, NVME_PCI_MAX_QUEUE_SIZE); } static inline unsigned int sq_idx(unsigned int qid, u32 stride) { return qid * 2 * stride; } static inline unsigned int cq_idx(unsigned int qid, u32 stride) { return (qid * 2 + 1) * stride; } static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) { return container_of(ctrl, struct nvme_dev, ctrl); } /* * An NVM Express queue. Each device has at least two (one for admin * commands and one for I/O commands). */ struct nvme_queue { struct nvme_dev *dev; spinlock_t sq_lock; void *sq_cmds; /* only used for poll queues: */ spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; struct nvme_completion *cqes; dma_addr_t sq_dma_addr; dma_addr_t cq_dma_addr; u32 __iomem *q_db; u32 q_depth; u16 cq_vector; u16 sq_tail; u16 last_sq_tail; u16 cq_head; u16 qid; u8 cq_phase; u8 sqes; unsigned long flags; #define NVMEQ_ENABLED 0 #define NVMEQ_SQ_CMB 1 #define NVMEQ_DELETE_ERROR 2 #define NVMEQ_POLLED 3 __le32 *dbbuf_sq_db; __le32 *dbbuf_cq_db; __le32 *dbbuf_sq_ei; __le32 *dbbuf_cq_ei; struct completion delete_done; }; union nvme_descriptor { struct nvme_sgl_desc *sg_list; __le64 *prp_list; }; /* * The nvme_iod describes the data in an I/O. * * The sg pointer contains the list of PRP/SGL chunk allocations in addition * to the actual struct scatterlist. */ struct nvme_iod { struct nvme_request req; struct nvme_command cmd; bool aborted; s8 nr_allocations; /* PRP list pool allocations. 0 means small pool in use */ unsigned int dma_len; /* length of single DMA segment mapping */ dma_addr_t first_dma; dma_addr_t meta_dma; struct sg_table sgt; union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; }; static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) { return dev->nr_allocated_queues * 8 * dev->db_stride; } static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev) { unsigned int mem_size = nvme_dbbuf_size(dev); if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) return; if (dev->dbbuf_dbs) { /* * Clear the dbbuf memory so the driver doesn't observe stale * values from the previous instantiation. */ memset(dev->dbbuf_dbs, 0, mem_size); memset(dev->dbbuf_eis, 0, mem_size); return; } dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, &dev->dbbuf_dbs_dma_addr, GFP_KERNEL); if (!dev->dbbuf_dbs) goto fail; dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, &dev->dbbuf_eis_dma_addr, GFP_KERNEL); if (!dev->dbbuf_eis) goto fail_free_dbbuf_dbs; return; fail_free_dbbuf_dbs: dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); dev->dbbuf_dbs = NULL; fail: dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); } static void nvme_dbbuf_dma_free(struct nvme_dev *dev) { unsigned int mem_size = nvme_dbbuf_size(dev); if (dev->dbbuf_dbs) { dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); dev->dbbuf_dbs = NULL; } if (dev->dbbuf_eis) { dma_free_coherent(dev->dev, mem_size, dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); dev->dbbuf_eis = NULL; } } static void nvme_dbbuf_init(struct nvme_dev *dev, struct nvme_queue *nvmeq, int qid) { if (!dev->dbbuf_dbs || !qid) return; nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; } static void nvme_dbbuf_free(struct nvme_queue *nvmeq) { if (!nvmeq->qid) return; nvmeq->dbbuf_sq_db = NULL; nvmeq->dbbuf_cq_db = NULL; nvmeq->dbbuf_sq_ei = NULL; nvmeq->dbbuf_cq_ei = NULL; } static void nvme_dbbuf_set(struct nvme_dev *dev) { struct nvme_command c = { }; unsigned int i; if (!dev->dbbuf_dbs) return; c.dbbuf.opcode = nvme_admin_dbbuf; c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); /* Free memory and continue on */ nvme_dbbuf_dma_free(dev); for (i = 1; i <= dev->online_queues; i++) nvme_dbbuf_free(&dev->queues[i]); } } static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) { return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); } /* Update dbbuf and return true if an MMIO is required */ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, volatile __le32 *dbbuf_ei) { if (dbbuf_db) { u16 old_value, event_idx; /* * Ensure that the queue is written before updating * the doorbell in memory */ wmb(); old_value = le32_to_cpu(*dbbuf_db); *dbbuf_db = cpu_to_le32(value); /* * Ensure that the doorbell is updated before reading the event * index from memory. The controller needs to provide similar * ordering to ensure the envent index is updated before reading * the doorbell. */ mb(); event_idx = le32_to_cpu(*dbbuf_ei); if (!nvme_dbbuf_need_event(event_idx, value, old_value)) return false; } return true; } /* * Will slightly overestimate the number of pages needed. This is OK * as it only leads to a small amount of wasted memory for the lifetime of * the I/O. */ static int nvme_pci_npages_prp(void) { unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); } static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_dev *dev = to_nvme_dev(data); struct nvme_queue *nvmeq = &dev->queues[0]; WARN_ON(hctx_idx != 0); WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); hctx->driver_data = nvmeq; return 0; } static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_dev *dev = to_nvme_dev(data); struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); hctx->driver_data = nvmeq; return 0; } static int nvme_pci_init_request(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx, unsigned int numa_node) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_req(req)->ctrl = set->driver_data; nvme_req(req)->cmd = &iod->cmd; return 0; } static int queue_irq_offset(struct nvme_dev *dev) { /* if we have more than 1 vec, admin queue offsets us by 1 */ if (dev->num_vecs > 1) return 1; return 0; } static void nvme_pci_map_queues(struct blk_mq_tag_set *set) { struct nvme_dev *dev = to_nvme_dev(set->driver_data); int i, qoff, offset; offset = queue_irq_offset(dev); for (i = 0, qoff = 0; i < set->nr_maps; i++) { struct blk_mq_queue_map *map = &set->map[i]; map->nr_queues = dev->io_queues[i]; if (!map->nr_queues) { BUG_ON(i == HCTX_TYPE_DEFAULT); continue; } /* * The poll queue(s) doesn't have an IRQ (and hence IRQ * affinity), so use the regular blk-mq cpu mapping */ map->queue_offset = qoff; if (i != HCTX_TYPE_POLL && offset) blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); else blk_mq_map_queues(map); qoff += map->nr_queues; offset += map->nr_queues; } } /* * Write sq tail if we are asked to, or if the next command would wrap. */ static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) { if (!write_sq) { u16 next_tail = nvmeq->sq_tail + 1; if (next_tail == nvmeq->q_depth) next_tail = 0; if (next_tail != nvmeq->last_sq_tail) return; } if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) writel(nvmeq->sq_tail, nvmeq->q_db); nvmeq->last_sq_tail = nvmeq->sq_tail; } static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) { memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), absolute_pointer(cmd), sizeof(*cmd)); if (++nvmeq->sq_tail == nvmeq->q_depth) nvmeq->sq_tail = 0; } static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) { struct nvme_queue *nvmeq = hctx->driver_data; spin_lock(&nvmeq->sq_lock); if (nvmeq->sq_tail != nvmeq->last_sq_tail) nvme_write_sq_db(nvmeq, true); spin_unlock(&nvmeq->sq_lock); } static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, int nseg) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; unsigned int avg_seg_size; avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); if (!nvme_ctrl_sgl_supported(&dev->ctrl)) return false; if (!nvmeq->qid) return false; if (!sgl_threshold || avg_seg_size < sgl_threshold) return false; return true; } static void nvme_free_prps(struct nvme_dev *dev, struct request *req) { const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); dma_addr_t dma_addr = iod->first_dma; int i; for (i = 0; i < iod->nr_allocations; i++) { __le64 *prp_list = iod->list[i].prp_list; dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); dma_addr = next_dma_addr; } } static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); if (iod->dma_len) { dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, rq_dma_dir(req)); return; } WARN_ON_ONCE(!iod->sgt.nents); dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); if (iod->nr_allocations == 0) dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, iod->first_dma); else if (iod->nr_allocations == 1) dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, iod->first_dma); else nvme_free_prps(dev, req); mempool_free(iod->sgt.sgl, dev->iod_mempool); } static void nvme_print_sgl(struct scatterlist *sgl, int nents) { int i; struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { dma_addr_t phys = sg_phys(sg); pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " "dma_address:%pad dma_length:%d\n", i, &phys, sg->offset, sg->length, &sg_dma_address(sg), sg_dma_len(sg)); } } static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, struct request *req, struct nvme_rw_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct dma_pool *pool; int length = blk_rq_payload_bytes(req); struct scatterlist *sg = iod->sgt.sgl; int dma_len = sg_dma_len(sg); u64 dma_addr = sg_dma_address(sg); int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); __le64 *prp_list; dma_addr_t prp_dma; int nprps, i; length -= (NVME_CTRL_PAGE_SIZE - offset); if (length <= 0) { iod->first_dma = 0; goto done; } dma_len -= (NVME_CTRL_PAGE_SIZE - offset); if (dma_len) { dma_addr += (NVME_CTRL_PAGE_SIZE - offset); } else { sg = sg_next(sg); dma_addr = sg_dma_address(sg); dma_len = sg_dma_len(sg); } if (length <= NVME_CTRL_PAGE_SIZE) { iod->first_dma = dma_addr; goto done; } nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); if (nprps <= (256 / 8)) { pool = dev->prp_small_pool; iod->nr_allocations = 0; } else { pool = dev->prp_page_pool; iod->nr_allocations = 1; } prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) { iod->nr_allocations = -1; return BLK_STS_RESOURCE; } iod->list[0].prp_list = prp_list; iod->first_dma = prp_dma; i = 0; for (;;) { if (i == NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list = prp_list; prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) goto free_prps; iod->list[iod->nr_allocations++].prp_list = prp_list; prp_list[0] = old_prp_list[i - 1]; old_prp_list[i - 1] = cpu_to_le64(prp_dma); i = 1; } prp_list[i++] = cpu_to_le64(dma_addr); dma_len -= NVME_CTRL_PAGE_SIZE; dma_addr += NVME_CTRL_PAGE_SIZE; length -= NVME_CTRL_PAGE_SIZE; if (length <= 0) break; if (dma_len > 0) continue; if (unlikely(dma_len < 0)) goto bad_sgl; sg = sg_next(sg); dma_addr = sg_dma_address(sg); dma_len = sg_dma_len(sg); } done: cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); return BLK_STS_OK; free_prps: nvme_free_prps(dev, req); return BLK_STS_RESOURCE; bad_sgl: WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req), iod->sgt.nents); return BLK_STS_IOERR; } static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, struct scatterlist *sg) { sge->addr = cpu_to_le64(sg_dma_address(sg)); sge->length = cpu_to_le32(sg_dma_len(sg)); sge->type = NVME_SGL_FMT_DATA_DESC << 4; } static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, dma_addr_t dma_addr, int entries) { sge->addr = cpu_to_le64(dma_addr); sge->length = cpu_to_le32(entries * sizeof(*sge)); sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, struct request *req, struct nvme_rw_command *cmd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct dma_pool *pool; struct nvme_sgl_desc *sg_list; struct scatterlist *sg = iod->sgt.sgl; unsigned int entries = iod->sgt.nents; dma_addr_t sgl_dma; int i = 0; /* setting the transfer type as SGL */ cmd->flags = NVME_CMD_SGL_METABUF; if (entries == 1) { nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); return BLK_STS_OK; } if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { pool = dev->prp_small_pool; iod->nr_allocations = 0; } else { pool = dev->prp_page_pool; iod->nr_allocations = 1; } sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); if (!sg_list) { iod->nr_allocations = -1; return BLK_STS_RESOURCE; } iod->list[0].sg_list = sg_list; iod->first_dma = sgl_dma; nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); do { nvme_pci_sgl_set_data(&sg_list[i++], sg); sg = sg_next(sg); } while (--entries > 0); return BLK_STS_OK; } static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, struct request *req, struct nvme_rw_command *cmnd, struct bio_vec *bv) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->first_dma)) return BLK_STS_RESOURCE; iod->dma_len = bv->bv_len; cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); if (bv->bv_len > first_prp_len) cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); else cmnd->dptr.prp2 = 0; return BLK_STS_OK; } static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, struct request *req, struct nvme_rw_command *cmnd, struct bio_vec *bv) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->first_dma)) return BLK_STS_RESOURCE; iod->dma_len = bv->bv_len; cmnd->flags = NVME_CMD_SGL_METABUF; cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; return BLK_STS_OK; } static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, struct nvme_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret = BLK_STS_RESOURCE; int rc; if (blk_rq_nr_phys_segments(req) == 1) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct bio_vec bv = req_bvec(req); if (!is_pci_p2pdma_page(bv.bv_page)) { if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) return nvme_setup_prp_simple(dev, req, &cmnd->rw, &bv); if (nvmeq->qid && sgl_threshold && nvme_ctrl_sgl_supported(&dev->ctrl)) return nvme_setup_sgl_simple(dev, req, &cmnd->rw, &bv); } } iod->dma_len = 0; iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); if (!iod->sgt.sgl) return BLK_STS_RESOURCE; sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); if (!iod->sgt.orig_nents) goto out_free_sg; rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), DMA_ATTR_NO_WARN); if (rc) { if (rc == -EREMOTEIO) ret = BLK_STS_TARGET; goto out_free_sg; } if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); else ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); if (ret != BLK_STS_OK) goto out_unmap_sg; return BLK_STS_OK; out_unmap_sg: dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); out_free_sg: mempool_free(iod->sgt.sgl, dev->iod_mempool); return ret; } static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, struct nvme_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->meta_dma)) return BLK_STS_IOERR; cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); return BLK_STS_OK; } static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; iod->aborted = false; iod->nr_allocations = -1; iod->sgt.nents = 0; ret = nvme_setup_cmd(req->q->queuedata, req); if (ret) return ret; if (blk_rq_nr_phys_segments(req)) { ret = nvme_map_data(dev, req, &iod->cmd); if (ret) goto out_free_cmd; } if (blk_integrity_rq(req)) { ret = nvme_map_metadata(dev, req, &iod->cmd); if (ret) goto out_unmap_data; } nvme_start_request(req); return BLK_STS_OK; out_unmap_data: nvme_unmap_data(dev, req); out_free_cmd: nvme_cleanup_cmd(req); return ret; } /* * NOTE: ns is NULL when called on the admin queue. */ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nvme_queue *nvmeq = hctx->driver_data; struct nvme_dev *dev = nvmeq->dev; struct request *req = bd->rq; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; /* * We should not need to do this, but we're still using this to * ensure we can drain requests on a dying queue. */ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) return BLK_STS_IOERR; if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) return nvme_fail_nonready_command(&dev->ctrl, req); ret = nvme_prep_rq(dev, req); if (unlikely(ret)) return ret; spin_lock(&nvmeq->sq_lock); nvme_sq_copy_cmd(nvmeq, &iod->cmd); nvme_write_sq_db(nvmeq, bd->last); spin_unlock(&nvmeq->sq_lock); return BLK_STS_OK; } static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) { spin_lock(&nvmeq->sq_lock); while (!rq_list_empty(*rqlist)) { struct request *req = rq_list_pop(rqlist); struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_sq_copy_cmd(nvmeq, &iod->cmd); } nvme_write_sq_db(nvmeq, true); spin_unlock(&nvmeq->sq_lock); } static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) { /* * We should not need to do this, but we're still using this to * ensure we can drain requests on a dying queue. */ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) return false; if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) return false; req->mq_hctx->tags->rqs[req->tag] = req; return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; } static void nvme_queue_rqs(struct request **rqlist) { struct request *req, *next, *prev = NULL; struct request *requeue_list = NULL; rq_list_for_each_safe(rqlist, req, next) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; if (!nvme_prep_rq_batch(nvmeq, req)) { /* detach 'req' and add to remainder list */ rq_list_move(rqlist, &requeue_list, req, prev); req = prev; if (!req) continue; } if (!next || req->mq_hctx != next->mq_hctx) { /* detach rest of list, and submit */ req->rq_next = NULL; nvme_submit_cmds(nvmeq, rqlist); *rqlist = next; prev = NULL; } else prev = req; } *rqlist = requeue_list; } static __always_inline void nvme_pci_unmap_rq(struct request *req) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_dev *dev = nvmeq->dev; if (blk_integrity_rq(req)) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); dma_unmap_page(dev->dev, iod->meta_dma, rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); } if (blk_rq_nr_phys_segments(req)) nvme_unmap_data(dev, req); } static void nvme_pci_complete_rq(struct request *req) { nvme_pci_unmap_rq(req); nvme_complete_rq(req); } static void nvme_pci_complete_batch(struct io_comp_batch *iob) { nvme_complete_batch(iob, nvme_pci_unmap_rq); } /* We read the CQE phase first to check if the rest of the entry is valid */ static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) { struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; } static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) { u16 head = nvmeq->cq_head; if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, nvmeq->dbbuf_cq_ei)) writel(head, nvmeq->q_db + nvmeq->dev->db_stride); } static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) { if (!nvmeq->qid) return nvmeq->dev->admin_tagset.tags[0]; return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; } static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, struct io_comp_batch *iob, u16 idx) { struct nvme_completion *cqe = &nvmeq->cqes[idx]; __u16 command_id = READ_ONCE(cqe->command_id); struct request *req; /* * AEN requests are special as they don't time out and can * survive any kind of queue freeze and often don't respond to * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { nvme_complete_async_event(&nvmeq->dev->ctrl, cqe->status, &cqe->result); return; } req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); if (unlikely(!req)) { dev_warn(nvmeq->dev->ctrl.device, "invalid id %d completed on queue %d\n", command_id, le16_to_cpu(cqe->sq_id)); return; } trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); if (!nvme_try_complete_req(req, cqe->status, cqe->result) && !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, nvme_pci_complete_batch)) nvme_pci_complete_rq(req); } static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) { u32 tmp = nvmeq->cq_head + 1; if (tmp == nvmeq->q_depth) { nvmeq->cq_head = 0; nvmeq->cq_phase ^= 1; } else { nvmeq->cq_head = tmp; } } static inline int nvme_poll_cq(struct nvme_queue *nvmeq, struct io_comp_batch *iob) { int found = 0; while (nvme_cqe_pending(nvmeq)) { found++; /* * load-load control dependency between phase and the rest of * the cqe requires a full read memory barrier */ dma_rmb(); nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); nvme_update_cq_head(nvmeq); } if (found) nvme_ring_cq_doorbell(nvmeq); return found; } static irqreturn_t nvme_irq(int irq, void *data) { struct nvme_queue *nvmeq = data; DEFINE_IO_COMP_BATCH(iob); if (nvme_poll_cq(nvmeq, &iob)) { if (!rq_list_empty(iob.req_list)) nvme_pci_complete_batch(&iob); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t nvme_irq_check(int irq, void *data) { struct nvme_queue *nvmeq = data; if (nvme_cqe_pending(nvmeq)) return IRQ_WAKE_THREAD; return IRQ_NONE; } /* * Poll for completions for any interrupt driven queue * Can be called from any context. */ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) { struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); nvme_poll_cq(nvmeq, NULL); enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_queue *nvmeq = hctx->driver_data; bool found; if (!nvme_cqe_pending(nvmeq)) return 0; spin_lock(&nvmeq->cq_poll_lock); found = nvme_poll_cq(nvmeq, iob); spin_unlock(&nvmeq->cq_poll_lock); return found; } static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) { struct nvme_dev *dev = to_nvme_dev(ctrl); struct nvme_queue *nvmeq = &dev->queues[0]; struct nvme_command c = { }; c.common.opcode = nvme_admin_async_event; c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; spin_lock(&nvmeq->sq_lock); nvme_sq_copy_cmd(nvmeq, &c); nvme_write_sq_db(nvmeq, true); spin_unlock(&nvmeq->sq_lock); } static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { struct nvme_command c = { }; c.delete_queue.opcode = opcode; c.delete_queue.qid = cpu_to_le16(id); return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); } static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq, s16 vector) { struct nvme_command c = { }; int flags = NVME_QUEUE_PHYS_CONTIG; if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) flags |= NVME_CQ_IRQ_ENABLED; /* * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ c.create_cq.opcode = nvme_admin_create_cq; c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); c.create_cq.cqid = cpu_to_le16(qid); c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); c.create_cq.cq_flags = cpu_to_le16(flags); c.create_cq.irq_vector = cpu_to_le16(vector); return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); } static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq) { struct nvme_ctrl *ctrl = &dev->ctrl; struct nvme_command c = { }; int flags = NVME_QUEUE_PHYS_CONTIG; /* * Some drives have a bug that auto-enables WRRU if MEDIUM isn't * set. Since URGENT priority is zeroes, it makes all queues * URGENT. */ if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) flags |= NVME_SQ_PRIO_MEDIUM; /* * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ c.create_sq.opcode = nvme_admin_create_sq; c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); c.create_sq.sqid = cpu_to_le16(qid); c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); c.create_sq.sq_flags = cpu_to_le16(flags); c.create_sq.cqid = cpu_to_le16(qid); return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); } static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) { return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); } static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) { return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); } static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", nvme_req(req)->status); atomic_inc(&nvmeq->dev->ctrl.abort_limit); blk_mq_free_request(req); return RQ_END_IO_NONE; } static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) { /* If true, indicates loss of adapter communication, possibly by a * NVMe Subsystem reset. */ bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); /* If there is a reset/reinit ongoing, we shouldn't reset again. */ switch (dev->ctrl.state) { case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: return false; default: break; } /* We shouldn't reset unless the controller is on fatal error state * _or_ if we lost the communication with it. */ if (!(csts & NVME_CSTS_CFS) && !nssro) return false; return true; } static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) { /* Read a config register to help see what died. */ u16 pci_status; int result; result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, &pci_status); if (result == PCIBIOS_SUCCESSFUL) dev_warn(dev->ctrl.device, "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", csts, pci_status); else dev_warn(dev->ctrl.device, "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", csts, result); if (csts != ~0) return; dev_warn(dev->ctrl.device, "Does your device have a faulty power saving mode enabled?\n"); dev_warn(dev->ctrl.device, "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); } static enum blk_eh_timer_return nvme_timeout(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; struct nvme_command cmd = { }; u32 csts = readl(dev->bar + NVME_REG_CSTS); /* If PCI error recovery process is happening, we cannot reset or * the recovery mechanism will surely fail. */ mb(); if (pci_channel_offline(to_pci_dev(dev->dev))) return BLK_EH_RESET_TIMER; /* * Reset immediately if the controller is failed */ if (nvme_should_reset(dev, csts)) { nvme_warn_reset(dev, csts); goto disable; } /* * Did we miss an interrupt? */ if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) nvme_poll(req->mq_hctx, NULL); else nvme_poll_irqdisable(nvmeq); if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, completion polled\n", req->tag, nvmeq->qid); return BLK_EH_DONE; } /* * Shutdown immediately if controller times out while starting. The * reset work will see the pci device disabled when it gets the forced * cancellation error. All outstanding requests are completed on * shutdown, so we return BLK_EH_DONE. */ switch (dev->ctrl.state) { case NVME_CTRL_CONNECTING: nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); fallthrough; case NVME_CTRL_DELETING: dev_warn_ratelimited(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_dev_disable(dev, true); return BLK_EH_DONE; case NVME_CTRL_RESETTING: return BLK_EH_RESET_TIMER; default: break; } /* * Shutdown the controller immediately and schedule a reset if the * command was already aborted once before and still hasn't been * returned to the driver, or if this is the admin queue. */ if (!nvmeq->qid || iod->aborted) { dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); nvme_req(req)->flags |= NVME_REQ_CANCELLED; goto disable; } if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; } iod->aborted = true; cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.cid = nvme_cid(req); cmd.abort.sqid = cpu_to_le16(nvmeq->qid); dev_warn(nvmeq->dev->ctrl.device, "I/O %d (%s) QID %d timeout, aborting\n", req->tag, nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode), nvmeq->qid); abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); if (IS_ERR(abort_req)) { atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; } nvme_init_request(abort_req, &cmd); abort_req->end_io = abort_endio; abort_req->end_io_data = NULL; blk_execute_rq_nowait(abort_req, false); /* * The aborted req will be completed on receiving the abort req. * We enable the timer again. If hit twice, it'll cause a device reset, * as the device then is in a faulty state. */ return BLK_EH_RESET_TIMER; disable: if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) return BLK_EH_DONE; nvme_dev_disable(dev, false); if (nvme_try_sched_reset(&dev->ctrl)) nvme_unquiesce_io_queues(&dev->ctrl); return BLK_EH_DONE; } static void nvme_free_queue(struct nvme_queue *nvmeq) { dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); if (!nvmeq->sq_cmds) return; if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), nvmeq->sq_cmds, SQ_SIZE(nvmeq)); } else { dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), nvmeq->sq_cmds, nvmeq->sq_dma_addr); } } static void nvme_free_queues(struct nvme_dev *dev, int lowest) { int i; for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { dev->ctrl.queue_count--; nvme_free_queue(&dev->queues[i]); } } static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid) { struct nvme_queue *nvmeq = &dev->queues[qid]; if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) return; /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ mb(); nvmeq->dev->online_queues--; if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); } static void nvme_suspend_io_queues(struct nvme_dev *dev) { int i; for (i = dev->ctrl.queue_count - 1; i > 0; i--) nvme_suspend_queue(dev, i); } /* * Called only on a device that has been disabled and after all other threads * that can check this device's completion queues have synced, except * nvme_poll(). This is the last chance for the driver to see a natural * completion before nvme_cancel_request() terminates all incomplete requests. */ static void nvme_reap_pending_cqes(struct nvme_dev *dev) { int i; for (i = dev->ctrl.queue_count - 1; i > 0; i--) { spin_lock(&dev->queues[i].cq_poll_lock); nvme_poll_cq(&dev->queues[i], NULL); spin_unlock(&dev->queues[i].cq_poll_lock); } } static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, int entry_size) { int q_depth = dev->q_depth; unsigned q_size_aligned = roundup(q_depth * entry_size, NVME_CTRL_PAGE_SIZE); if (q_size_aligned * nr_io_queues > dev->cmb_size) { u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); q_depth = div_u64(mem_per_q, entry_size); /* * Ensure the reduced q_depth is above some threshold where it * would be better to map queues in system memory with the * original depth */ if (q_depth < 64) return -ENOMEM; } return q_depth; } static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, int qid) { struct pci_dev *pdev = to_pci_dev(dev->dev); if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); if (nvmeq->sq_cmds) { nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, nvmeq->sq_cmds); if (nvmeq->sq_dma_addr) { set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); return 0; } pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); } } nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), &nvmeq->sq_dma_addr, GFP_KERNEL); if (!nvmeq->sq_cmds) return -ENOMEM; return 0; } static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) { struct nvme_queue *nvmeq = &dev->queues[qid]; if (dev->ctrl.queue_count > qid) return 0; nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; nvmeq->q_depth = depth; nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), &nvmeq->cq_dma_addr, GFP_KERNEL); if (!nvmeq->cqes) goto free_nvmeq; if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) goto free_cqdma; nvmeq->dev = dev; spin_lock_init(&nvmeq->sq_lock); spin_lock_init(&nvmeq->cq_poll_lock); nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->qid = qid; dev->ctrl.queue_count++; return 0; free_cqdma: dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); free_nvmeq: return -ENOMEM; } static int queue_request_irq(struct nvme_queue *nvmeq) { struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); int nr = nvmeq->dev->ctrl.instance; if (use_threaded_interrupts) { return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); } else { return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); } } static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) { struct nvme_dev *dev = nvmeq->dev; nvmeq->sq_tail = 0; nvmeq->last_sq_tail = 0; nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); nvme_dbbuf_init(dev, nvmeq, qid); dev->online_queues++; wmb(); /* ensure the first interrupt sees the initialization */ } /* * Try getting shutdown_lock while setting up IO queues. */ static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) { /* * Give up if the lock is being held by nvme_dev_disable. */ if (!mutex_trylock(&dev->shutdown_lock)) return -ENODEV; /* * Controller is in wrong state, fail early. */ if (dev->ctrl.state != NVME_CTRL_CONNECTING) { mutex_unlock(&dev->shutdown_lock); return -ENODEV; } return 0; } static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) { struct nvme_dev *dev = nvmeq->dev; int result; u16 vector = 0; clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); /* * A queue's vector matches the queue identifier unless the controller * has only one vector available. */ if (!polled) vector = dev->num_vecs == 1 ? 0 : qid; else set_bit(NVMEQ_POLLED, &nvmeq->flags); result = adapter_alloc_cq(dev, qid, nvmeq, vector); if (result) return result; result = adapter_alloc_sq(dev, qid, nvmeq); if (result < 0) return result; if (result) goto release_cq; nvmeq->cq_vector = vector; result = nvme_setup_io_queues_trylock(dev); if (result) return result; nvme_init_queue(nvmeq, qid); if (!polled) { result = queue_request_irq(nvmeq); if (result < 0) goto release_sq; } set_bit(NVMEQ_ENABLED, &nvmeq->flags); mutex_unlock(&dev->shutdown_lock); return result; release_sq: dev->online_queues--; mutex_unlock(&dev->shutdown_lock); adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); return result; } static const struct blk_mq_ops nvme_mq_admin_ops = { .queue_rq = nvme_queue_rq, .complete = nvme_pci_complete_rq, .init_hctx = nvme_admin_init_hctx, .init_request = nvme_pci_init_request, .timeout = nvme_timeout, }; static const struct blk_mq_ops nvme_mq_ops = { .queue_rq = nvme_queue_rq, .queue_rqs = nvme_queue_rqs, .complete = nvme_pci_complete_rq, .commit_rqs = nvme_commit_rqs, .init_hctx = nvme_init_hctx, .init_request = nvme_pci_init_request, .map_queues = nvme_pci_map_queues, .timeout = nvme_timeout, .poll = nvme_poll, }; static void nvme_dev_remove_admin(struct nvme_dev *dev) { if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { /* * If the controller was reset during removal, it's possible * user requests may be waiting on a stopped queue. Start the * queue to flush these to completion. */ nvme_unquiesce_admin_queue(&dev->ctrl); nvme_remove_admin_tag_set(&dev->ctrl); } } static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) { return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); } static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) { struct pci_dev *pdev = to_pci_dev(dev->dev); if (size <= dev->bar_mapped_size) return 0; if (size > pci_resource_len(pdev, 0)) return -ENOMEM; if (dev->bar) iounmap(dev->bar); dev->bar = ioremap(pci_resource_start(pdev, 0), size); if (!dev->bar) { dev->bar_mapped_size = 0; return -ENOMEM; } dev->bar_mapped_size = size; dev->dbs = dev->bar + NVME_REG_DBS; return 0; } static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) { int result; u32 aqa; struct nvme_queue *nvmeq; result = nvme_remap_bar(dev, db_bar_size(dev, 0)); if (result < 0) return result; dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? NVME_CAP_NSSRC(dev->ctrl.cap) : 0; if (dev->subsystem && (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); /* * If the device has been passed off to us in an enabled state, just * clear the enabled bit. The spec says we should set the 'shutdown * notification bits', but doing so may cause the device to complete * commands to the admin queue ... and we don't know what memory that * might be pointing at! */ result = nvme_disable_ctrl(&dev->ctrl, false); if (result < 0) return result; result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (result) return result; dev->ctrl.numa_node = dev_to_node(dev->dev); nvmeq = &dev->queues[0]; aqa = nvmeq->q_depth - 1; aqa |= aqa << 16; writel(aqa, dev->bar + NVME_REG_AQA); lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); result = nvme_enable_ctrl(&dev->ctrl); if (result) return result; nvmeq->cq_vector = 0; nvme_init_queue(nvmeq, 0); result = queue_request_irq(nvmeq); if (result) { dev->online_queues--; return result; } set_bit(NVMEQ_ENABLED, &nvmeq->flags); return result; } static int nvme_create_io_queues(struct nvme_dev *dev) { unsigned i, max, rw_queues; int ret = 0; for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { if (nvme_alloc_queue(dev, i, dev->q_depth)) { ret = -ENOMEM; break; } } max = min(dev->max_qid, dev->ctrl.queue_count - 1); if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + dev->io_queues[HCTX_TYPE_READ]; } else { rw_queues = max; } for (i = dev->online_queues; i <= max; i++) { bool polled = i > rw_queues; ret = nvme_create_queue(&dev->queues[i], i, polled); if (ret) break; } /* * Ignore failing Create SQ/CQ commands, we can continue with less * than the desired amount of queues, and even a controller without * I/O queues can still be used to issue admin commands. This might * be useful to upgrade a buggy firmware for example. */ return ret >= 0 ? 0 : ret; } static u64 nvme_cmb_size_unit(struct nvme_dev *dev) { u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; return 1ULL << (12 + 4 * szu); } static u32 nvme_cmb_size(struct nvme_dev *dev) { return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; } static void nvme_map_cmb(struct nvme_dev *dev) { u64 size, offset; resource_size_t bar_size; struct pci_dev *pdev = to_pci_dev(dev->dev); int bar; if (dev->cmb_size) return; if (NVME_CAP_CMBS(dev->ctrl.cap)) writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!dev->cmbsz) return; dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); bar = NVME_CMB_BIR(dev->cmbloc); bar_size = pci_resource_len(pdev, bar); if (offset > bar_size) return; /* * Tell the controller about the host side address mapping the CMB, * and enable CMB decoding for the NVMe 1.4+ scheme: */ if (NVME_CAP_CMBS(dev->ctrl.cap)) { hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | (pci_bus_address(pdev, bar) + offset), dev->bar + NVME_REG_CMBMSC); } /* * Controllers may support a CMB size larger than their BAR, * for example, due to being behind a bridge. Reduce the CMB to * the reported size of the BAR */ if (size > bar_size - offset) size = bar_size - offset; if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { dev_warn(dev->ctrl.device, "failed to register the CMB\n"); return; } dev->cmb_size = size; dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) pci_p2pmem_publish(pdev, true); nvme_update_attrs(dev); } static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) { u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; u64 dma_addr = dev->host_mem_descs_dma; struct nvme_command c = { }; int ret; c.features.opcode = nvme_admin_set_features; c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); c.features.dword11 = cpu_to_le32(bits); c.features.dword12 = cpu_to_le32(host_mem_size); c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); if (ret) { dev_warn(dev->ctrl.device, "failed to set host mem (err %d, flags %#x).\n", ret, bits); } else dev->hmb = bits & NVME_HOST_MEM_ENABLE; return ret; } static void nvme_free_host_mem(struct nvme_dev *dev) { int i; for (i = 0; i < dev->nr_host_mem_descs; i++) { struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], le64_to_cpu(desc->addr), DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); } kfree(dev->host_mem_desc_bufs); dev->host_mem_desc_bufs = NULL; dma_free_coherent(dev->dev, dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs = NULL; dev->nr_host_mem_descs = 0; } static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, u32 chunk_size) { struct nvme_host_mem_buf_desc *descs; u32 max_entries, len; dma_addr_t descs_dma; int i = 0; void **bufs; u64 size, tmp; tmp = (preferred + chunk_size - 1); do_div(tmp, chunk_size); max_entries = tmp; if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) max_entries = dev->ctrl.hmmaxd; descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), &descs_dma, GFP_KERNEL); if (!descs) goto out; bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); if (!bufs) goto out_free_descs; for (size = 0; size < preferred && i < max_entries; size += len) { dma_addr_t dma_addr; len = min_t(u64, chunk_size, preferred - size); bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); if (!bufs[i]) break; descs[i].addr = cpu_to_le64(dma_addr); descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); i++; } if (!size) goto out_free_bufs; dev->nr_host_mem_descs = i; dev->host_mem_size = size; dev->host_mem_descs = descs; dev->host_mem_descs_dma = descs_dma; dev->host_mem_desc_bufs = bufs; return 0; out_free_bufs: while (--i >= 0) { size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; dma_free_attrs(dev->dev, size, bufs[i], le64_to_cpu(descs[i].addr), DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); } kfree(bufs); out_free_descs: dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, descs_dma); out: dev->host_mem_descs = NULL; return -ENOMEM; } static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) { u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); u64 chunk_size; /* start big and work our way down */ for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { if (!min || dev->host_mem_size >= min) return 0; nvme_free_host_mem(dev); } } return -ENOMEM; } static int nvme_setup_host_mem(struct nvme_dev *dev) { u64 max = (u64)max_host_mem_size_mb * SZ_1M; u64 preferred = (u64)dev->ctrl.hmpre * 4096; u64 min = (u64)dev->ctrl.hmmin * 4096; u32 enable_bits = NVME_HOST_MEM_ENABLE; int ret; if (!dev->ctrl.hmpre) return 0; preferred = min(preferred, max); if (min > max) { dev_warn(dev->ctrl.device, "min host memory (%lld MiB) above limit (%d MiB).\n", min >> ilog2(SZ_1M), max_host_mem_size_mb); nvme_free_host_mem(dev); return 0; } /* * If we already have a buffer allocated check if we can reuse it. */ if (dev->host_mem_descs) { if (dev->host_mem_size >= min) enable_bits |= NVME_HOST_MEM_RETURN; else nvme_free_host_mem(dev); } if (!dev->host_mem_descs) { if (nvme_alloc_host_mem(dev, min, preferred)) { dev_warn(dev->ctrl.device, "failed to allocate host memory buffer.\n"); return 0; /* controller must work without HMB */ } dev_info(dev->ctrl.device, "allocated %lld MiB host memory buffer.\n", dev->host_mem_size >> ilog2(SZ_1M)); } ret = nvme_set_host_mem(dev, enable_bits); if (ret) nvme_free_host_mem(dev); return ret; } static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", ndev->cmbloc, ndev->cmbsz); } static DEVICE_ATTR_RO(cmb); static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); return sysfs_emit(buf, "%u\n", ndev->cmbloc); } static DEVICE_ATTR_RO(cmbloc); static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); return sysfs_emit(buf, "%u\n", ndev->cmbsz); } static DEVICE_ATTR_RO(cmbsz); static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); return sysfs_emit(buf, "%d\n", ndev->hmb); } static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); bool new; int ret; if (kstrtobool(buf, &new) < 0) return -EINVAL; if (new == ndev->hmb) return count; if (new) { ret = nvme_setup_host_mem(ndev); } else { ret = nvme_set_host_mem(ndev, 0); if (!ret) nvme_free_host_mem(ndev); } if (ret < 0) return ret; return count; } static DEVICE_ATTR_RW(hmb); static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct nvme_ctrl *ctrl = dev_get_drvdata(container_of(kobj, struct device, kobj)); struct nvme_dev *dev = to_nvme_dev(ctrl); if (a == &dev_attr_cmb.attr || a == &dev_attr_cmbloc.attr || a == &dev_attr_cmbsz.attr) { if (!dev->cmbsz) return 0; } if (a == &dev_attr_hmb.attr && !ctrl->hmpre) return 0; return a->mode; } static struct attribute *nvme_pci_attrs[] = { &dev_attr_cmb.attr, &dev_attr_cmbloc.attr, &dev_attr_cmbsz.attr, &dev_attr_hmb.attr, NULL, }; static const struct attribute_group nvme_pci_dev_attrs_group = { .attrs = nvme_pci_attrs, .is_visible = nvme_pci_attrs_are_visible, }; static const struct attribute_group *nvme_pci_dev_attr_groups[] = { &nvme_dev_attrs_group, &nvme_pci_dev_attrs_group, NULL, }; static void nvme_update_attrs(struct nvme_dev *dev) { sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); } /* * nirqs is the number of interrupts available for write and read * queues. The core already reserved an interrupt for the admin queue. */ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) { struct nvme_dev *dev = affd->priv; unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; /* * If there is no interrupt available for queues, ensure that * the default queue is set to 1. The affinity set size is * also set to one, but the irq core ignores it for this case. * * If only one interrupt is available or 'write_queue' == 0, combine * write and read queues. * * If 'write_queues' > 0, ensure it leaves room for at least one read * queue. */ if (!nrirqs) { nrirqs = 1; nr_read_queues = 0; } else if (nrirqs == 1 || !nr_write_queues) { nr_read_queues = 0; } else if (nr_write_queues >= nrirqs) { nr_read_queues = 1; } else { nr_read_queues = nrirqs - nr_write_queues; } dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; affd->set_size[HCTX_TYPE_READ] = nr_read_queues; affd->nr_sets = nr_read_queues ? 2 : 1; } static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) { struct pci_dev *pdev = to_pci_dev(dev->dev); struct irq_affinity affd = { .pre_vectors = 1, .calc_sets = nvme_calc_irq_sets, .priv = dev, }; unsigned int irq_queues, poll_queues; /* * Poll queues don't need interrupts, but we need at least one I/O queue * left over for non-polled I/O. */ poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); dev->io_queues[HCTX_TYPE_POLL] = poll_queues; /* * Initialize for the single interrupt case, will be updated in * nvme_calc_irq_sets(). */ dev->io_queues[HCTX_TYPE_DEFAULT] = 1; dev->io_queues[HCTX_TYPE_READ] = 0; /* * We need interrupts for the admin queue and each non-polled I/O queue, * but some Apple controllers require all queues to use the first * vector. */ irq_queues = 1; if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) irq_queues += (nr_io_queues - poll_queues); return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); } static unsigned int nvme_max_io_queues(struct nvme_dev *dev) { /* * If tags are shared with admin queue (Apple bug), then * make sure we only use one IO queue. */ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) return 1; return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; } static int nvme_setup_io_queues(struct nvme_dev *dev) { struct nvme_queue *adminq = &dev->queues[0]; struct pci_dev *pdev = to_pci_dev(dev->dev); unsigned int nr_io_queues; unsigned long size; int result; /* * Sample the module parameters once at reset time so that we have * stable values to work with. */ dev->nr_write_queues = write_queues; dev->nr_poll_queues = poll_queues; nr_io_queues = dev->nr_allocated_queues - 1; result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) return result; if (nr_io_queues == 0) return 0; /* * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions * from set to unset. If there is a window to it is truely freed, * pci_free_irq_vectors() jumping into this window will crash. * And take lock to avoid racing with pci_free_irq_vectors() in * nvme_dev_disable() path. */ result = nvme_setup_io_queues_trylock(dev); if (result) return result; if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) pci_free_irq(pdev, 0, adminq); if (dev->cmb_use_sqes) { result = nvme_cmb_qdepth(dev, nr_io_queues, sizeof(struct nvme_command)); if (result > 0) { dev->q_depth = result; dev->ctrl.sqsize = result - 1; } else { dev->cmb_use_sqes = false; } } do { size = db_bar_size(dev, nr_io_queues); result = nvme_remap_bar(dev, size); if (!result) break; if (!--nr_io_queues) { result = -ENOMEM; goto out_unlock; } } while (1); adminq->q_db = dev->dbs; retry: /* Deregister the admin queue's interrupt */ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) pci_free_irq(pdev, 0, adminq); /* * If we enable msix early due to not intx, disable it again before * setting up the full range we need. */ pci_free_irq_vectors(pdev); result = nvme_setup_irqs(dev, nr_io_queues); if (result <= 0) { result = -EIO; goto out_unlock; } dev->num_vecs = result; result = max(result - 1, 1); dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; /* * Should investigate if there's a performance win from allocating * more queues than interrupt vectors; it might allow the submission * path to scale better, even if the receive path is limited by the * number of interrupts. */ result = queue_request_irq(adminq); if (result) goto out_unlock; set_bit(NVMEQ_ENABLED, &adminq->flags); mutex_unlock(&dev->shutdown_lock); result = nvme_create_io_queues(dev); if (result || dev->online_queues < 2) return result; if (dev->online_queues - 1 < dev->max_qid) { nr_io_queues = dev->online_queues - 1; nvme_delete_io_queues(dev); result = nvme_setup_io_queues_trylock(dev); if (result) return result; nvme_suspend_io_queues(dev); goto retry; } dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", dev->io_queues[HCTX_TYPE_DEFAULT], dev->io_queues[HCTX_TYPE_READ], dev->io_queues[HCTX_TYPE_POLL]); return 0; out_unlock: mutex_unlock(&dev->shutdown_lock); return result; } static enum rq_end_io_ret nvme_del_queue_end(struct request *req, blk_status_t error) { struct nvme_queue *nvmeq = req->end_io_data; blk_mq_free_request(req); complete(&nvmeq->delete_done); return RQ_END_IO_NONE; } static enum rq_end_io_ret nvme_del_cq_end(struct request *req, blk_status_t error) { struct nvme_queue *nvmeq = req->end_io_data; if (error) set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); return nvme_del_queue_end(req, error); } static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) { struct request_queue *q = nvmeq->dev->ctrl.admin_q; struct request *req; struct nvme_command cmd = { }; cmd.delete_queue.opcode = opcode; cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); if (IS_ERR(req)) return PTR_ERR(req); nvme_init_request(req, &cmd); if (opcode == nvme_admin_delete_cq) req->end_io = nvme_del_cq_end; else req->end_io = nvme_del_queue_end; req->end_io_data = nvmeq; init_completion(&nvmeq->delete_done); blk_execute_rq_nowait(req, false); return 0; } static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode) { int nr_queues = dev->online_queues - 1, sent = 0; unsigned long timeout; retry: timeout = NVME_ADMIN_TIMEOUT; while (nr_queues > 0) { if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) break; nr_queues--; sent++; } while (sent) { struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, timeout); if (timeout == 0) return false; sent--; if (nr_queues) goto retry; } return true; } static void nvme_delete_io_queues(struct nvme_dev *dev) { if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq)) __nvme_delete_io_queues(dev, nvme_admin_delete_cq); } static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) { if (dev->io_queues[HCTX_TYPE_POLL]) return 3; if (dev->io_queues[HCTX_TYPE_READ]) return 2; return 1; } static void nvme_pci_update_nr_queues(struct nvme_dev *dev) { blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); /* free previously allocated queues that are no longer usable */ nvme_free_queues(dev, dev->online_queues); } static int nvme_pci_enable(struct nvme_dev *dev) { int result = -ENOMEM; struct pci_dev *pdev = to_pci_dev(dev->dev); if (pci_enable_device_mem(pdev)) return result; pci_set_master(pdev); if (readl(dev->bar + NVME_REG_CSTS) == -1) { result = -ENODEV; goto disable; } /* * Some devices and/or platforms don't advertise or work with INTx * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll * adjust this later. */ result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (result < 0) goto disable; dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, io_queue_depth); dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); dev->dbs = dev->bar + 4096; /* * Some Apple controllers require a non-standard SQE size. * Interestingly they also seem to ignore the CC:IOSQES register * so we don't bother updating it here. */ if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) dev->io_sqes = 7; else dev->io_sqes = NVME_NVM_IOSQES; /* * Temporary fix for the Apple controller found in the MacBook8,1 and * some MacBook7,1 to avoid controller resets and data loss. */ if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { dev->q_depth = 2; dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " "set queue depth=%u to work around controller resets\n", dev->q_depth); } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && (pdev->device == 0xa821 || pdev->device == 0xa822) && NVME_CAP_MQES(dev->ctrl.cap) == 0) { dev->q_depth = 64; dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " "set queue depth=%u\n", dev->q_depth); } /* * Controllers with the shared tags quirk need the IO queue to be * big enough so that we get 32 tags for the admin queue */ if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && (dev->q_depth < (NVME_AQ_DEPTH + 2))) { dev->q_depth = NVME_AQ_DEPTH + 2; dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", dev->q_depth); } dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ nvme_map_cmb(dev); pci_save_state(pdev); result = nvme_pci_configure_admin_queue(dev); if (result) goto free_irq; return result; free_irq: pci_free_irq_vectors(pdev); disable: pci_disable_device(pdev); return result; } static void nvme_dev_unmap(struct nvme_dev *dev) { if (dev->bar) iounmap(dev->bar); pci_release_mem_regions(to_pci_dev(dev->dev)); } static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); u32 csts; if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev)) return true; if (pdev->error_state != pci_channel_io_normal) return true; csts = readl(dev->bar + NVME_REG_CSTS); return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY); } static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { struct pci_dev *pdev = to_pci_dev(dev->dev); bool dead; mutex_lock(&dev->shutdown_lock); dead = nvme_pci_ctrl_is_dead(dev); if (dev->ctrl.state == NVME_CTRL_LIVE || dev->ctrl.state == NVME_CTRL_RESETTING) { if (pci_is_enabled(pdev)) nvme_start_freeze(&dev->ctrl); /* * Give the controller a chance to complete all entered requests * if doing a safe shutdown. */ if (!dead && shutdown) nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); } nvme_quiesce_io_queues(&dev->ctrl); if (!dead && dev->ctrl.queue_count > 0) { nvme_delete_io_queues(dev); nvme_disable_ctrl(&dev->ctrl, shutdown); nvme_poll_irqdisable(&dev->queues[0]); } nvme_suspend_io_queues(dev); nvme_suspend_queue(dev, 0); pci_free_irq_vectors(pdev); if (pci_is_enabled(pdev)) pci_disable_device(pdev); nvme_reap_pending_cqes(dev); nvme_cancel_tagset(&dev->ctrl); nvme_cancel_admin_tagset(&dev->ctrl); /* * The driver will not be starting up queues again if shutting down so * must flush all entered requests to their failed completion to avoid * deadlocking blk-mq hot-cpu notifier. */ if (shutdown) { nvme_unquiesce_io_queues(&dev->ctrl); if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) nvme_unquiesce_admin_queue(&dev->ctrl); } mutex_unlock(&dev->shutdown_lock); } static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) { if (!nvme_wait_reset(&dev->ctrl)) return -EBUSY; nvme_dev_disable(dev, shutdown); return 0; } static int nvme_setup_prp_pools(struct nvme_dev *dev) { dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0); if (!dev->prp_page_pool) return -ENOMEM; /* Optimisation for I/Os between 4k and 128k */ dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 256, 256, 0); if (!dev->prp_small_pool) { dma_pool_destroy(dev->prp_page_pool); return -ENOMEM; } return 0; } static void nvme_release_prp_pools(struct nvme_dev *dev) { dma_pool_destroy(dev->prp_page_pool); dma_pool_destroy(dev->prp_small_pool); } static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) { size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, mempool_kfree, (void *)alloc_size, GFP_KERNEL, dev_to_node(dev->dev)); if (!dev->iod_mempool) return -ENOMEM; return 0; } static void nvme_free_tagset(struct nvme_dev *dev) { if (dev->tagset.tags) nvme_remove_io_tag_set(&dev->ctrl); dev->ctrl.tagset = NULL; } /* pairs with nvme_pci_alloc_dev */ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) { struct nvme_dev *dev = to_nvme_dev(ctrl); nvme_free_tagset(dev); put_device(dev->dev); kfree(dev->queues); kfree(dev); } static void nvme_reset_work(struct work_struct *work) { struct nvme_dev *dev = container_of(work, struct nvme_dev, ctrl.reset_work); bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); int result; if (dev->ctrl.state != NVME_CTRL_RESETTING) { dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", dev->ctrl.state); result = -ENODEV; goto out; } /* * If we're called to reset a live controller first shut it down before * moving on. */ if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) nvme_dev_disable(dev, false); nvme_sync_queues(&dev->ctrl); mutex_lock(&dev->shutdown_lock); result = nvme_pci_enable(dev); if (result) goto out_unlock; nvme_unquiesce_admin_queue(&dev->ctrl); mutex_unlock(&dev->shutdown_lock); /* * Introduce CONNECTING state from nvme-fc/rdma transports to mark the * initializing procedure here. */ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { dev_warn(dev->ctrl.device, "failed to mark controller CONNECTING\n"); result = -EBUSY; goto out; } result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); if (result) goto out; nvme_dbbuf_dma_alloc(dev); result = nvme_setup_host_mem(dev); if (result < 0) goto out; result = nvme_setup_io_queues(dev); if (result) goto out; /* * Freeze and update the number of I/O queues as thos might have * changed. If there are no I/O queues left after this reset, keep the * controller around but remove all namespaces. */ if (dev->online_queues > 1) { nvme_unquiesce_io_queues(&dev->ctrl); nvme_wait_freeze(&dev->ctrl); nvme_pci_update_nr_queues(dev); nvme_dbbuf_set(dev); nvme_unfreeze(&dev->ctrl); } else { dev_warn(dev->ctrl.device, "IO queues lost\n"); nvme_mark_namespaces_dead(&dev->ctrl); nvme_unquiesce_io_queues(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl); nvme_free_tagset(dev); } /* * If only admin queue live, keep it to do further investigation or * recovery. */ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { dev_warn(dev->ctrl.device, "failed to mark controller live state\n"); result = -ENODEV; goto out; } nvme_start_ctrl(&dev->ctrl); return; out_unlock: mutex_unlock(&dev->shutdown_lock); out: /* * Set state to deleting now to avoid blocking nvme_wait_reset(), which * may be holding this pci_dev's device lock. */ dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", result); nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); nvme_dev_disable(dev, true); nvme_sync_queues(&dev->ctrl); nvme_mark_namespaces_dead(&dev->ctrl); nvme_unquiesce_io_queues(&dev->ctrl); nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); } static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) { *val = readl(to_nvme_dev(ctrl)->bar + off); return 0; } static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) { writel(val, to_nvme_dev(ctrl)->bar + off); return 0; } static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) { *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); return 0; } static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) { struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); } static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) { struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); struct nvme_subsystem *subsys = ctrl->subsys; dev_err(ctrl->device, "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", pdev->vendor, pdev->device, nvme_strlen(subsys->model, sizeof(subsys->model)), subsys->model, nvme_strlen(subsys->firmware_rev, sizeof(subsys->firmware_rev)), subsys->firmware_rev); } static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) { struct nvme_dev *dev = to_nvme_dev(ctrl); return dma_pci_p2pdma_supported(dev->dev); } static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .name = "pcie", .module = THIS_MODULE, .flags = NVME_F_METADATA_SUPPORTED, .dev_attr_groups = nvme_pci_dev_attr_groups, .reg_read32 = nvme_pci_reg_read32, .reg_write32 = nvme_pci_reg_write32, .reg_read64 = nvme_pci_reg_read64, .free_ctrl = nvme_pci_free_ctrl, .submit_async_event = nvme_pci_submit_async_event, .get_address = nvme_pci_get_address, .print_device_info = nvme_pci_print_device_info, .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, }; static int nvme_dev_map(struct nvme_dev *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); if (pci_request_mem_regions(pdev, "nvme")) return -ENODEV; if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) goto release; return 0; release: pci_release_mem_regions(pdev); return -ENODEV; } static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) { if (pdev->vendor == 0x144d && pdev->device == 0xa802) { /* * Several Samsung devices seem to drop off the PCIe bus * randomly when APST is on and uses the deepest sleep state. * This has been observed on a Samsung "SM951 NVMe SAMSUNG * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD * 950 PRO 256GB", but it seems to be restricted to two Dell * laptops. */ if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) return NVME_QUIRK_NO_DEEPEST_PS; } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { /* * Samsung SSD 960 EVO drops off the PCIe bus after system * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as * within few minutes after bootup on a Coffee Lake board - * ASUS PRIME Z370-A */ if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) return NVME_QUIRK_NO_APST; } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || pdev->device == 0xa808 || pdev->device == 0xa809)) || (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { /* * Forcing to use host managed nvme power settings for * lowest idle power with quick resume latency on * Samsung and Toshiba SSDs based on suspend behavior * on Coffee Lake board for LENOVO C640 */ if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && dmi_match(DMI_BOARD_NAME, "LNVNB161216")) return NVME_QUIRK_SIMPLE_SUSPEND; } return 0; } static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned long quirks = id->driver_data; int node = dev_to_node(&pdev->dev); struct nvme_dev *dev; int ret = -ENOMEM; dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); if (!dev) return ERR_PTR(-ENOMEM); INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); mutex_init(&dev->shutdown_lock); dev->nr_write_queues = write_queues; dev->nr_poll_queues = poll_queues; dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; dev->queues = kcalloc_node(dev->nr_allocated_queues, sizeof(struct nvme_queue), GFP_KERNEL, node); if (!dev->queues) goto out_free_dev; dev->dev = get_device(&pdev->dev); quirks |= check_vendor_combination_bug(pdev); if (!noacpi && acpi_storage_d3(&pdev->dev)) { /* * Some systems use a bios work around to ask for D3 on * platforms that support kernel managed suspend. */ dev_info(&pdev->dev, "platform quirk: setting simple suspend\n"); quirks |= NVME_QUIRK_SIMPLE_SUSPEND; } ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, quirks); if (ret) goto out_put_device; if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); else dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); dma_set_max_seg_size(&pdev->dev, 0xffffffff); /* * Limit the max command size to prevent iod->sg allocations going * over a single page. */ dev->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); dev->ctrl.max_segments = NVME_MAX_SEGS; /* * There is no support for SGLs for metadata (yet), so we are limited to * a single integrity segment for the separate metadata pointer. */ dev->ctrl.max_integrity_segments = 1; return dev; out_put_device: put_device(dev->dev); kfree(dev->queues); out_free_dev: kfree(dev); return ERR_PTR(ret); } static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct nvme_dev *dev; int result = -ENOMEM; dev = nvme_pci_alloc_dev(pdev, id); if (IS_ERR(dev)) return PTR_ERR(dev); result = nvme_dev_map(dev); if (result) goto out_uninit_ctrl; result = nvme_setup_prp_pools(dev); if (result) goto out_dev_unmap; result = nvme_pci_alloc_iod_mempool(dev); if (result) goto out_release_prp_pools; dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); result = nvme_pci_enable(dev); if (result) goto out_release_iod_mempool; result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, &nvme_mq_admin_ops, sizeof(struct nvme_iod)); if (result) goto out_disable; /* * Mark the controller as connecting before sending admin commands to * allow the timeout handler to do the right thing. */ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { dev_warn(dev->ctrl.device, "failed to mark controller CONNECTING\n"); result = -EBUSY; goto out_disable; } result = nvme_init_ctrl_finish(&dev->ctrl, false); if (result) goto out_disable; nvme_dbbuf_dma_alloc(dev); result = nvme_setup_host_mem(dev); if (result < 0) goto out_disable; result = nvme_setup_io_queues(dev); if (result) goto out_disable; if (dev->online_queues > 1) { nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); nvme_dbbuf_set(dev); } if (!dev->ctrl.tagset) dev_warn(dev->ctrl.device, "IO queues not created\n"); if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { dev_warn(dev->ctrl.device, "failed to mark controller live state\n"); result = -ENODEV; goto out_disable; } pci_set_drvdata(pdev, dev); nvme_start_ctrl(&dev->ctrl); nvme_put_ctrl(&dev->ctrl); flush_work(&dev->ctrl.scan_work); return 0; out_disable: nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); nvme_dev_disable(dev, true); nvme_free_host_mem(dev); nvme_dev_remove_admin(dev); nvme_dbbuf_dma_free(dev); nvme_free_queues(dev, 0); out_release_iod_mempool: mempool_destroy(dev->iod_mempool); out_release_prp_pools: nvme_release_prp_pools(dev); out_dev_unmap: nvme_dev_unmap(dev); out_uninit_ctrl: nvme_uninit_ctrl(&dev->ctrl); nvme_put_ctrl(&dev->ctrl); return result; } static void nvme_reset_prepare(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); /* * We don't need to check the return value from waiting for the reset * state as pci_dev device lock is held, making it impossible to race * with ->remove(). */ nvme_disable_prepare_reset(dev, false); nvme_sync_queues(&dev->ctrl); } static void nvme_reset_done(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); if (!nvme_try_sched_reset(&dev->ctrl)) flush_work(&dev->ctrl.reset_work); } static void nvme_shutdown(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); nvme_disable_prepare_reset(dev, true); } /* * The driver's remove may be called on a device in a partially initialized * state. This function must not have any dependencies on the device state in * order to proceed. */ static void nvme_remove(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); pci_set_drvdata(pdev, NULL); if (!pci_device_is_present(pdev)) { nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); nvme_dev_disable(dev, true); } flush_work(&dev->ctrl.reset_work); nvme_stop_ctrl(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl); nvme_dev_disable(dev, true); nvme_free_host_mem(dev); nvme_dev_remove_admin(dev); nvme_dbbuf_dma_free(dev); nvme_free_queues(dev, 0); mempool_destroy(dev->iod_mempool); nvme_release_prp_pools(dev); nvme_dev_unmap(dev); nvme_uninit_ctrl(&dev->ctrl); } #ifdef CONFIG_PM_SLEEP static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) { return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); } static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) { return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); } static int nvme_resume(struct device *dev) { struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); struct nvme_ctrl *ctrl = &ndev->ctrl; if (ndev->last_ps == U32_MAX || nvme_set_power_state(ctrl, ndev->last_ps) != 0) goto reset; if (ctrl->hmpre && nvme_setup_host_mem(ndev)) goto reset; return 0; reset: return nvme_try_sched_reset(ctrl); } static int nvme_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); struct nvme_ctrl *ctrl = &ndev->ctrl; int ret = -EBUSY; ndev->last_ps = U32_MAX; /* * The platform does not remove power for a kernel managed suspend so * use host managed nvme power settings for lowest idle power if * possible. This should have quicker resume latency than a full device * shutdown. But if the firmware is involved after the suspend or the * device does not support any non-default power states, shut down the * device fully. * * If ASPM is not enabled for the device, shut down the device and allow * the PCI bus layer to put it into D3 in order to take the PCIe link * down, so as to allow the platform to achieve its minimum low-power * state (which may not be possible if the link is up). */ if (pm_suspend_via_firmware() || !ctrl->npss || !pcie_aspm_enabled(pdev) || (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) return nvme_disable_prepare_reset(ndev, true); nvme_start_freeze(ctrl); nvme_wait_freeze(ctrl); nvme_sync_queues(ctrl); if (ctrl->state != NVME_CTRL_LIVE) goto unfreeze; /* * Host memory access may not be successful in a system suspend state, * but the specification allows the controller to access memory in a * non-operational power state. */ if (ndev->hmb) { ret = nvme_set_host_mem(ndev, 0); if (ret < 0) goto unfreeze; } ret = nvme_get_power_state(ctrl, &ndev->last_ps); if (ret < 0) goto unfreeze; /* * A saved state prevents pci pm from generically controlling the * device's power. If we're using protocol specific settings, we don't * want pci interfering. */ pci_save_state(pdev); ret = nvme_set_power_state(ctrl, ctrl->npss); if (ret < 0) goto unfreeze; if (ret) { /* discard the saved state */ pci_load_saved_state(pdev, NULL); /* * Clearing npss forces a controller reset on resume. The * correct value will be rediscovered then. */ ret = nvme_disable_prepare_reset(ndev, true); ctrl->npss = 0; } unfreeze: nvme_unfreeze(ctrl); return ret; } static int nvme_simple_suspend(struct device *dev) { struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); return nvme_disable_prepare_reset(ndev, true); } static int nvme_simple_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); return nvme_try_sched_reset(&ndev->ctrl); } static const struct dev_pm_ops nvme_dev_pm_ops = { .suspend = nvme_suspend, .resume = nvme_resume, .freeze = nvme_simple_suspend, .thaw = nvme_simple_resume, .poweroff = nvme_simple_suspend, .restore = nvme_simple_resume, }; #endif /* CONFIG_PM_SLEEP */ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct nvme_dev *dev = pci_get_drvdata(pdev); /* * A frozen channel requires a reset. When detected, this method will * shutdown the controller to quiesce. The controller will be restarted * after the slot reset through driver's slot_reset callback. */ switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: dev_warn(dev->ctrl.device, "frozen state error detected, reset controller\n"); if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { nvme_dev_disable(dev, true); return PCI_ERS_RESULT_DISCONNECT; } nvme_dev_disable(dev, false); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: dev_warn(dev->ctrl.device, "failure state error detected, request disconnect\n"); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); dev_info(dev->ctrl.device, "restart after slot reset\n"); pci_restore_state(pdev); if (!nvme_try_sched_reset(&dev->ctrl)) nvme_unquiesce_io_queues(&dev->ctrl); return PCI_ERS_RESULT_RECOVERED; } static void nvme_error_resume(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); flush_work(&dev->ctrl.reset_work); } static const struct pci_error_handlers nvme_err_handler = { .error_detected = nvme_error_detected, .slot_reset = nvme_slot_reset, .resume = nvme_error_resume, .reset_prepare = nvme_reset_prepare, .reset_done = nvme_reset_done, }; static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_MEDIUM_PRIO_SQ | NVME_QUIRK_NO_TEMP_THRESH_CHANGE | NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | NVME_QUIRK_DISABLE_WRITE_ZEROES | NVME_QUIRK_BOGUS_NID, }, { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES | NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */ .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, }, { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ .driver_data = NVME_QUIRK_BOGUS_NID | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), .driver_data = NVME_QUIRK_SINGLE_VECTOR }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), .driver_data = NVME_QUIRK_SINGLE_VECTOR | NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_SHARED_TAGS | NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { 0, } }; MODULE_DEVICE_TABLE(pci, nvme_id_table); static struct pci_driver nvme_driver = { .name = "nvme", .id_table = nvme_id_table, .probe = nvme_probe, .remove = nvme_remove, .shutdown = nvme_shutdown, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, #ifdef CONFIG_PM_SLEEP .pm = &nvme_dev_pm_ops, #endif }, .sriov_configure = pci_sriov_configure_simple, .err_handler = &nvme_err_handler, }; static int __init nvme_init(void) { BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); return pci_register_driver(&nvme_driver); } static void __exit nvme_exit(void) { pci_unregister_driver(&nvme_driver); flush_workqueue(nvme_wq); } MODULE_AUTHOR("Matthew Wilcox <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); module_init(nvme_init); module_exit(nvme_exit);
linux-master
drivers/nvme/host/pci.c
// SPDX-License-Identifier: GPL-2.0 /* * Apple ANS NVM Express device driver * Copyright The Asahi Linux Contributors * * Based on the pci.c NVM Express device driver * Copyright (c) 2011-2014, Intel Corporation. * and on the rdma.c NVMe over Fabrics RDMA host code. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #include <linux/async.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/interrupt.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/jiffies.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/once.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/soc/apple/rtkit.h> #include <linux/soc/apple/sart.h> #include <linux/reset.h> #include <linux/time64.h> #include "nvme.h" #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC #define APPLE_ANS_MAX_QUEUE_DEPTH 64 #define APPLE_ANS_COPROC_CPU_CONTROL 0x44 #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4) #define APPLE_ANS_ACQ_DB 0x1004 #define APPLE_ANS_IOCQ_DB 0x100c #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210 #define APPLE_ANS_BOOT_STATUS 0x1300 #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55 #define APPLE_ANS_UNKNOWN_CTRL 0x24008 #define APPLE_ANS_PRP_NULL_CHECK BIT(11) #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908 #define APPLE_ANS_LINEAR_SQ_EN BIT(0) #define APPLE_ANS_LINEAR_ASQ_DB 0x2490c #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910 #define APPLE_NVMMU_NUM_TCBS 0x28100 #define APPLE_NVMMU_ASQ_TCB_BASE 0x28108 #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110 #define APPLE_NVMMU_TCB_INVAL 0x28118 #define APPLE_NVMMU_TCB_STAT 0x28120 /* * This controller is a bit weird in the way command tags works: Both the * admin and the IO queue share the same tag space. Additionally, tags * cannot be higher than 0x40 which effectively limits the combined * queue depth to 0x40. Instead of wasting half of that on the admin queue * which gets much less traffic we instead reduce its size here. * The controller also doesn't support async event such that no space must * be reserved for NVME_NR_AEN_COMMANDS. */ #define APPLE_NVME_AQ_DEPTH 2 #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1) /* * These can be higher, but we need to ensure that any command doesn't * require an sg allocation that needs more than a page of data. */ #define NVME_MAX_KB_SZ 4096 #define NVME_MAX_SEGS 127 /* * This controller comes with an embedded IOMMU known as NVMMU. * The NVMMU is pointed to an array of TCBs indexed by the command tag. * Each command must be configured inside this structure before it's allowed * to execute, including commands that don't require DMA transfers. * * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the * admin queue): Those commands must still be added to the NVMMU but the DMA * buffers cannot be represented as PRPs and must instead be allowed using SART. * * Programming the PRPs to the same values as those in the submission queue * looks rather silly at first. This hardware is however designed for a kernel * that runs the NVMMU code in a higher exception level than the NVMe driver. * In that setting the NVMe driver first programs the submission queue entry * and then executes a hypercall to the code that is allowed to program the * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while * verifying that they don't point to kernel text, data, pagetables, or similar * protected areas before programming the TCB to point to this shadow copy. * Since Linux doesn't do any of that we may as well just point both the queue * and the TCB PRP pointer to the same memory. */ struct apple_nvmmu_tcb { u8 opcode; #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0) #define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1) u8 dma_flags; u8 command_id; u8 _unk0; __le16 length; u8 _unk1[18]; __le64 prp1; __le64 prp2; u8 _unk2[16]; u8 aes_iv[8]; u8 _aes_unk[64]; }; /* * The Apple NVMe controller only supports a single admin and a single IO queue * which are both limited to 64 entries and share a single interrupt. * * The completion queue works as usual. The submission "queue" instead is * an array indexed by the command tag on this hardware. Commands must also be * present in the NVMMU's tcb array. They are triggered by writing their tag to * a MMIO register. */ struct apple_nvme_queue { struct nvme_command *sqes; struct nvme_completion *cqes; struct apple_nvmmu_tcb *tcbs; dma_addr_t sq_dma_addr; dma_addr_t cq_dma_addr; dma_addr_t tcb_dma_addr; u32 __iomem *sq_db; u32 __iomem *cq_db; u16 cq_head; u8 cq_phase; bool is_adminq; bool enabled; }; /* * The apple_nvme_iod describes the data in an I/O. * * The sg pointer contains the list of PRP chunk allocations in addition * to the actual struct scatterlist. */ struct apple_nvme_iod { struct nvme_request req; struct nvme_command cmd; struct apple_nvme_queue *q; int npages; /* In the PRP list. 0 means small pool in use */ int nents; /* Used in scatterlist */ dma_addr_t first_dma; unsigned int dma_len; /* length of single DMA segment mapping */ struct scatterlist *sg; }; struct apple_nvme { struct device *dev; void __iomem *mmio_coproc; void __iomem *mmio_nvme; struct device **pd_dev; struct device_link **pd_link; int pd_count; struct apple_sart *sart; struct apple_rtkit *rtk; struct reset_control *reset; struct dma_pool *prp_page_pool; struct dma_pool *prp_small_pool; mempool_t *iod_mempool; struct nvme_ctrl ctrl; struct work_struct remove_work; struct apple_nvme_queue adminq; struct apple_nvme_queue ioq; struct blk_mq_tag_set admin_tagset; struct blk_mq_tag_set tagset; int irq; spinlock_t lock; }; static_assert(sizeof(struct nvme_command) == 64); static_assert(sizeof(struct apple_nvmmu_tcb) == 128); static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl) { return container_of(ctrl, struct apple_nvme, ctrl); } static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q) { if (q->is_adminq) return container_of(q, struct apple_nvme, adminq); return container_of(q, struct apple_nvme, ioq); } static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) { if (q->is_adminq) return APPLE_NVME_AQ_DEPTH; return APPLE_ANS_MAX_QUEUE_DEPTH; } static void apple_nvme_rtkit_crashed(void *cookie) { struct apple_nvme *anv = cookie; dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot"); nvme_reset_ctrl(&anv->ctrl); } static int apple_nvme_sart_dma_setup(void *cookie, struct apple_rtkit_shmem *bfr) { struct apple_nvme *anv = cookie; int ret; if (bfr->iova) return -EINVAL; if (!bfr->size) return -EINVAL; bfr->buffer = dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL); if (!bfr->buffer) return -ENOMEM; ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size); if (ret) { dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova); bfr->buffer = NULL; return -ENOMEM; } return 0; } static void apple_nvme_sart_dma_destroy(void *cookie, struct apple_rtkit_shmem *bfr) { struct apple_nvme *anv = cookie; apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size); dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova); } static const struct apple_rtkit_ops apple_nvme_rtkit_ops = { .crashed = apple_nvme_rtkit_crashed, .shmem_setup = apple_nvme_sart_dma_setup, .shmem_destroy = apple_nvme_sart_dma_destroy, }; static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag) { struct apple_nvme *anv = queue_to_apple_nvme(q); writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL); if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT)) dev_warn_ratelimited(anv->dev, "NVMMU TCB invalidation failed\n"); } static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, struct nvme_command *cmd) { struct apple_nvme *anv = queue_to_apple_nvme(q); u32 tag = nvme_tag_from_cid(cmd->common.command_id); struct apple_nvmmu_tcb *tcb = &q->tcbs[tag]; tcb->opcode = cmd->common.opcode; tcb->prp1 = cmd->common.dptr.prp1; tcb->prp2 = cmd->common.dptr.prp2; tcb->length = cmd->rw.length; tcb->command_id = tag; if (nvme_is_write(cmd)) tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE; else tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE; memcpy(&q->sqes[tag], cmd, sizeof(*cmd)); /* * This lock here doesn't make much sense at a first glace but * removing it will result in occasional missed completetion * interrupts even though the commands still appear on the CQ. * It's unclear why this happens but our best guess is that * there is a bug in the firmware triggered when a new command * is issued while we're inside the irq handler between the * NVMMU invalidation (and making the tag available again) * and the final CQ update. */ spin_lock_irq(&anv->lock); writel(tag, q->sq_db); spin_unlock_irq(&anv->lock); } /* * From pci.c: * Will slightly overestimate the number of pages needed. This is OK * as it only leads to a small amount of wasted memory for the lifetime of * the I/O. */ static inline size_t apple_nvme_iod_alloc_size(void) { const unsigned int nprps = DIV_ROUND_UP( NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE); const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); const size_t alloc_size = sizeof(__le64 *) * npages + sizeof(struct scatterlist) * NVME_MAX_SEGS; return alloc_size; } static void **apple_nvme_iod_list(struct request *req) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); } static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req) { const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); dma_addr_t dma_addr = iod->first_dma; int i; for (i = 0; i < iod->npages; i++) { __le64 *prp_list = apple_nvme_iod_list(req)[i]; dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); dma_pool_free(anv->prp_page_pool, prp_list, dma_addr); dma_addr = next_dma_addr; } } static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); if (iod->dma_len) { dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len, rq_dma_dir(req)); return; } WARN_ON_ONCE(!iod->nents); dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); if (iod->npages == 0) dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0], iod->first_dma); else apple_nvme_free_prps(anv, req); mempool_free(iod->sg, anv->iod_mempool); } static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents) { int i; struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { dma_addr_t phys = sg_phys(sg); pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n", i, &phys, sg->offset, sg->length, &sg_dma_address(sg), sg_dma_len(sg)); } } static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv, struct request *req, struct nvme_rw_command *cmnd) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); struct dma_pool *pool; int length = blk_rq_payload_bytes(req); struct scatterlist *sg = iod->sg; int dma_len = sg_dma_len(sg); u64 dma_addr = sg_dma_address(sg); int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); __le64 *prp_list; void **list = apple_nvme_iod_list(req); dma_addr_t prp_dma; int nprps, i; length -= (NVME_CTRL_PAGE_SIZE - offset); if (length <= 0) { iod->first_dma = 0; goto done; } dma_len -= (NVME_CTRL_PAGE_SIZE - offset); if (dma_len) { dma_addr += (NVME_CTRL_PAGE_SIZE - offset); } else { sg = sg_next(sg); dma_addr = sg_dma_address(sg); dma_len = sg_dma_len(sg); } if (length <= NVME_CTRL_PAGE_SIZE) { iod->first_dma = dma_addr; goto done; } nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); if (nprps <= (256 / 8)) { pool = anv->prp_small_pool; iod->npages = 0; } else { pool = anv->prp_page_pool; iod->npages = 1; } prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) { iod->first_dma = dma_addr; iod->npages = -1; return BLK_STS_RESOURCE; } list[0] = prp_list; iod->first_dma = prp_dma; i = 0; for (;;) { if (i == NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list = prp_list; prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) goto free_prps; list[iod->npages++] = prp_list; prp_list[0] = old_prp_list[i - 1]; old_prp_list[i - 1] = cpu_to_le64(prp_dma); i = 1; } prp_list[i++] = cpu_to_le64(dma_addr); dma_len -= NVME_CTRL_PAGE_SIZE; dma_addr += NVME_CTRL_PAGE_SIZE; length -= NVME_CTRL_PAGE_SIZE; if (length <= 0) break; if (dma_len > 0) continue; if (unlikely(dma_len < 0)) goto bad_sgl; sg = sg_next(sg); dma_addr = sg_dma_address(sg); dma_len = sg_dma_len(sg); } done: cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); return BLK_STS_OK; free_prps: apple_nvme_free_prps(anv, req); return BLK_STS_RESOURCE; bad_sgl: WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents), "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req), iod->nents); return BLK_STS_IOERR; } static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv, struct request *req, struct nvme_rw_command *cmnd, struct bio_vec *bv) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0); if (dma_mapping_error(anv->dev, iod->first_dma)) return BLK_STS_RESOURCE; iod->dma_len = bv->bv_len; cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); if (bv->bv_len > first_prp_len) cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); return BLK_STS_OK; } static blk_status_t apple_nvme_map_data(struct apple_nvme *anv, struct request *req, struct nvme_command *cmnd) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret = BLK_STS_RESOURCE; int nr_mapped; if (blk_rq_nr_phys_segments(req) == 1) { struct bio_vec bv = req_bvec(req); if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw, &bv); } iod->dma_len = 0; iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC); if (!iod->sg) return BLK_STS_RESOURCE; sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); iod->nents = blk_rq_map_sg(req->q, req, iod->sg); if (!iod->nents) goto out_free_sg; nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); if (!nr_mapped) goto out_free_sg; ret = apple_nvme_setup_prps(anv, req, &cmnd->rw); if (ret != BLK_STS_OK) goto out_unmap_sg; return BLK_STS_OK; out_unmap_sg: dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); out_free_sg: mempool_free(iod->sg, anv->iod_mempool); return ret; } static __always_inline void apple_nvme_unmap_rq(struct request *req) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); struct apple_nvme *anv = queue_to_apple_nvme(iod->q); if (blk_rq_nr_phys_segments(req)) apple_nvme_unmap_data(anv, req); } static void apple_nvme_complete_rq(struct request *req) { apple_nvme_unmap_rq(req); nvme_complete_rq(req); } static void apple_nvme_complete_batch(struct io_comp_batch *iob) { nvme_complete_batch(iob, apple_nvme_unmap_rq); } static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q) { struct nvme_completion *hcqe = &q->cqes[q->cq_head]; return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase; } static inline struct blk_mq_tags * apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q) { if (q->is_adminq) return anv->admin_tagset.tags[0]; else return anv->tagset.tags[0]; } static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, struct io_comp_batch *iob, u16 idx) { struct apple_nvme *anv = queue_to_apple_nvme(q); struct nvme_completion *cqe = &q->cqes[idx]; __u16 command_id = READ_ONCE(cqe->command_id); struct request *req; apple_nvmmu_inval(q, command_id); req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id); if (unlikely(!req)) { dev_warn(anv->dev, "invalid id %d completed", command_id); return; } if (!nvme_try_complete_req(req, cqe->status, cqe->result) && !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, apple_nvme_complete_batch)) apple_nvme_complete_rq(req); } static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q) { u32 tmp = q->cq_head + 1; if (tmp == apple_nvme_queue_depth(q)) { q->cq_head = 0; q->cq_phase ^= 1; } else { q->cq_head = tmp; } } static bool apple_nvme_poll_cq(struct apple_nvme_queue *q, struct io_comp_batch *iob) { bool found = false; while (apple_nvme_cqe_pending(q)) { found = true; /* * load-load control dependency between phase and the rest of * the cqe requires a full read memory barrier */ dma_rmb(); apple_nvme_handle_cqe(q, iob, q->cq_head); apple_nvme_update_cq_head(q); } if (found) writel(q->cq_head, q->cq_db); return found; } static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force) { bool found; DEFINE_IO_COMP_BATCH(iob); if (!READ_ONCE(q->enabled) && !force) return false; found = apple_nvme_poll_cq(q, &iob); if (!rq_list_empty(iob.req_list)) apple_nvme_complete_batch(&iob); return found; } static irqreturn_t apple_nvme_irq(int irq, void *data) { struct apple_nvme *anv = data; bool handled = false; unsigned long flags; spin_lock_irqsave(&anv->lock, flags); if (apple_nvme_handle_cq(&anv->ioq, false)) handled = true; if (apple_nvme_handle_cq(&anv->adminq, false)) handled = true; spin_unlock_irqrestore(&anv->lock, flags); if (handled) return IRQ_HANDLED; return IRQ_NONE; } static int apple_nvme_create_cq(struct apple_nvme *anv) { struct nvme_command c = {}; /* * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ c.create_cq.opcode = nvme_admin_create_cq; c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr); c.create_cq.cqid = cpu_to_le16(1); c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED); c.create_cq.irq_vector = cpu_to_le16(0); return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); } static int apple_nvme_remove_cq(struct apple_nvme *anv) { struct nvme_command c = {}; c.delete_queue.opcode = nvme_admin_delete_cq; c.delete_queue.qid = cpu_to_le16(1); return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); } static int apple_nvme_create_sq(struct apple_nvme *anv) { struct nvme_command c = {}; /* * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ c.create_sq.opcode = nvme_admin_create_sq; c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr); c.create_sq.sqid = cpu_to_le16(1); c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG); c.create_sq.cqid = cpu_to_le16(1); return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); } static int apple_nvme_remove_sq(struct apple_nvme *anv) { struct nvme_command c = {}; c.delete_queue.opcode = nvme_admin_delete_sq; c.delete_queue.qid = cpu_to_le16(1); return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); } static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nvme_ns *ns = hctx->queue->queuedata; struct apple_nvme_queue *q = hctx->driver_data; struct apple_nvme *anv = queue_to_apple_nvme(q); struct request *req = bd->rq; struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_command *cmnd = &iod->cmd; blk_status_t ret; iod->npages = -1; iod->nents = 0; /* * We should not need to do this, but we're still using this to * ensure we can drain requests on a dying queue. */ if (unlikely(!READ_ONCE(q->enabled))) return BLK_STS_IOERR; if (!nvme_check_ready(&anv->ctrl, req, true)) return nvme_fail_nonready_command(&anv->ctrl, req); ret = nvme_setup_cmd(ns, req); if (ret) return ret; if (blk_rq_nr_phys_segments(req)) { ret = apple_nvme_map_data(anv, req, cmnd); if (ret) goto out_free_cmd; } nvme_start_request(req); apple_nvme_submit_cmd(q, cmnd); return BLK_STS_OK; out_free_cmd: nvme_cleanup_cmd(req); return ret; } static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { hctx->driver_data = data; return 0; } static int apple_nvme_init_request(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx, unsigned int numa_node) { struct apple_nvme_queue *q = set->driver_data; struct apple_nvme *anv = queue_to_apple_nvme(q); struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_request *nreq = nvme_req(req); iod->q = q; nreq->ctrl = &anv->ctrl; nreq->cmd = &iod->cmd; return 0; } static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) { u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); bool dead = false, freeze = false; unsigned long flags; if (apple_rtkit_is_crashed(anv->rtk)) dead = true; if (!(csts & NVME_CSTS_RDY)) dead = true; if (csts & NVME_CSTS_CFS) dead = true; if (anv->ctrl.state == NVME_CTRL_LIVE || anv->ctrl.state == NVME_CTRL_RESETTING) { freeze = true; nvme_start_freeze(&anv->ctrl); } /* * Give the controller a chance to complete all entered requests if * doing a safe shutdown. */ if (!dead && shutdown && freeze) nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT); nvme_quiesce_io_queues(&anv->ctrl); if (!dead) { if (READ_ONCE(anv->ioq.enabled)) { apple_nvme_remove_sq(anv); apple_nvme_remove_cq(anv); } /* * Always disable the NVMe controller after shutdown. * We need to do this to bring it back up later anyway, and we * can't do it while the firmware is not running (e.g. in the * resume reset path before RTKit is initialized), so for Apple * controllers it makes sense to unconditionally do it here. * Additionally, this sequence of events is reliable, while * others (like disabling after bringing back the firmware on * resume) seem to run into trouble under some circumstances. * * Both U-Boot and m1n1 also use this convention (i.e. an ANS * NVMe controller is handed off with firmware shut down, in an * NVMe disabled state, after a clean shutdown). */ if (shutdown) nvme_disable_ctrl(&anv->ctrl, shutdown); nvme_disable_ctrl(&anv->ctrl, false); } WRITE_ONCE(anv->ioq.enabled, false); WRITE_ONCE(anv->adminq.enabled, false); mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */ nvme_quiesce_admin_queue(&anv->ctrl); /* last chance to complete any requests before nvme_cancel_request */ spin_lock_irqsave(&anv->lock, flags); apple_nvme_handle_cq(&anv->ioq, true); apple_nvme_handle_cq(&anv->adminq, true); spin_unlock_irqrestore(&anv->lock, flags); nvme_cancel_tagset(&anv->ctrl); nvme_cancel_admin_tagset(&anv->ctrl); /* * The driver will not be starting up queues again if shutting down so * must flush all entered requests to their failed completion to avoid * deadlocking blk-mq hot-cpu notifier. */ if (shutdown) { nvme_unquiesce_io_queues(&anv->ctrl); nvme_unquiesce_admin_queue(&anv->ctrl); } } static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); struct apple_nvme_queue *q = iod->q; struct apple_nvme *anv = queue_to_apple_nvme(q); unsigned long flags; u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); if (anv->ctrl.state != NVME_CTRL_LIVE) { /* * From rdma.c: * If we are resetting, connecting or deleting we should * complete immediately because we may block controller * teardown or setup sequence * - ctrl disable/shutdown fabrics requests * - connect requests * - initialization admin requests * - I/O requests that entered after unquiescing and * the controller stopped responding * * All other requests should be cancelled by the error * recovery work, so it's fine that we fail it here. */ dev_warn(anv->dev, "I/O %d(aq:%d) timeout while not in live state\n", req->tag, q->is_adminq); if (blk_mq_request_started(req) && !blk_mq_request_completed(req)) { nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; nvme_req(req)->flags |= NVME_REQ_CANCELLED; blk_mq_complete_request(req); } return BLK_EH_DONE; } /* check if we just missed an interrupt if we're still alive */ if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) { spin_lock_irqsave(&anv->lock, flags); apple_nvme_handle_cq(q, false); spin_unlock_irqrestore(&anv->lock, flags); if (blk_mq_request_completed(req)) { dev_warn(anv->dev, "I/O %d(aq:%d) timeout: completion polled\n", req->tag, q->is_adminq); return BLK_EH_DONE; } } /* * aborting commands isn't supported which leaves a full reset as our * only option here */ dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n", req->tag, q->is_adminq); nvme_req(req)->flags |= NVME_REQ_CANCELLED; apple_nvme_disable(anv, false); nvme_reset_ctrl(&anv->ctrl); return BLK_EH_DONE; } static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct apple_nvme_queue *q = hctx->driver_data; struct apple_nvme *anv = queue_to_apple_nvme(q); bool found; unsigned long flags; spin_lock_irqsave(&anv->lock, flags); found = apple_nvme_poll_cq(q, iob); spin_unlock_irqrestore(&anv->lock, flags); return found; } static const struct blk_mq_ops apple_nvme_mq_admin_ops = { .queue_rq = apple_nvme_queue_rq, .complete = apple_nvme_complete_rq, .init_hctx = apple_nvme_init_hctx, .init_request = apple_nvme_init_request, .timeout = apple_nvme_timeout, }; static const struct blk_mq_ops apple_nvme_mq_ops = { .queue_rq = apple_nvme_queue_rq, .complete = apple_nvme_complete_rq, .init_hctx = apple_nvme_init_hctx, .init_request = apple_nvme_init_request, .timeout = apple_nvme_timeout, .poll = apple_nvme_poll, }; static void apple_nvme_init_queue(struct apple_nvme_queue *q) { unsigned int depth = apple_nvme_queue_depth(q); q->cq_head = 0; q->cq_phase = 1; memset(q->tcbs, 0, APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb)); memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); WRITE_ONCE(q->enabled, true); wmb(); /* ensure the first interrupt sees the initialization */ } static void apple_nvme_reset_work(struct work_struct *work) { unsigned int nr_io_queues = 1; int ret; u32 boot_status, aqa; struct apple_nvme *anv = container_of(work, struct apple_nvme, ctrl.reset_work); if (anv->ctrl.state != NVME_CTRL_RESETTING) { dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", anv->ctrl.state); ret = -ENODEV; goto out; } /* there's unfortunately no known way to recover if RTKit crashed :( */ if (apple_rtkit_is_crashed(anv->rtk)) { dev_err(anv->dev, "RTKit has crashed without any way to recover."); ret = -EIO; goto out; } /* RTKit must be shut down cleanly for the (soft)-reset to work */ if (apple_rtkit_is_running(anv->rtk)) { /* reset the controller if it is enabled */ if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) apple_nvme_disable(anv, false); dev_dbg(anv->dev, "Trying to shut down RTKit before reset."); ret = apple_rtkit_shutdown(anv->rtk); if (ret) goto out; } writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); ret = reset_control_assert(anv->reset); if (ret) goto out; ret = apple_rtkit_reinit(anv->rtk); if (ret) goto out; ret = reset_control_deassert(anv->reset); if (ret) goto out; writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); ret = apple_rtkit_boot(anv->rtk); if (ret) { dev_err(anv->dev, "ANS did not boot"); goto out; } ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS, boot_status, boot_status == APPLE_ANS_BOOT_STATUS_OK, USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT); if (ret) { dev_err(anv->dev, "ANS did not initialize"); goto out; } dev_dbg(anv->dev, "ANS booted successfully."); /* * Limit the max command size to prevent iod->sg allocations going * over a single page. */ anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, dma_max_mapping_size(anv->dev) >> 9); anv->ctrl.max_segments = NVME_MAX_SEGS; dma_set_max_seg_size(anv->dev, 0xffffffff); /* * Enable NVMMU and linear submission queues. * While we could keep those disabled and pretend this is slightly * more common NVMe controller we'd still need some quirks (e.g. * sq entries will be 128 bytes) and Apple might drop support for * that mode in the future. */ writel(APPLE_ANS_LINEAR_SQ_EN, anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); /* Allow as many pending command as possible for both queues */ writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16), anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL); /* Setup the NVMMU for the maximum admin and IO queue depth */ writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1, anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); /* * This is probably a chicken bit: without it all commands where any PRP * is set to zero (including those that don't use that field) fail and * the co-processor complains about "completed with err BAD_CMD-" or * a "NULL_PRP_PTR_ERR" in the syslog */ writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & ~APPLE_ANS_PRP_NULL_CHECK, anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); /* Setup the admin queue */ aqa = APPLE_NVME_AQ_DEPTH - 1; aqa |= aqa << 16; writel(aqa, anv->mmio_nvme + NVME_REG_AQA); writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ); writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ); /* Setup NVMMU for both queues */ writeq(anv->adminq.tcb_dma_addr, anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); writeq(anv->ioq.tcb_dma_addr, anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); anv->ctrl.sqsize = APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */ anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP); dev_dbg(anv->dev, "Enabling controller now"); ret = nvme_enable_ctrl(&anv->ctrl); if (ret) goto out; dev_dbg(anv->dev, "Starting admin queue"); apple_nvme_init_queue(&anv->adminq); nvme_unquiesce_admin_queue(&anv->ctrl); if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) { dev_warn(anv->ctrl.device, "failed to mark controller CONNECTING\n"); ret = -ENODEV; goto out; } ret = nvme_init_ctrl_finish(&anv->ctrl, false); if (ret) goto out; dev_dbg(anv->dev, "Creating IOCQ"); ret = apple_nvme_create_cq(anv); if (ret) goto out; dev_dbg(anv->dev, "Creating IOSQ"); ret = apple_nvme_create_sq(anv); if (ret) goto out_remove_cq; apple_nvme_init_queue(&anv->ioq); nr_io_queues = 1; ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues); if (ret) goto out_remove_sq; if (nr_io_queues != 1) { ret = -ENXIO; goto out_remove_sq; } anv->ctrl.queue_count = nr_io_queues + 1; nvme_unquiesce_io_queues(&anv->ctrl); nvme_wait_freeze(&anv->ctrl); blk_mq_update_nr_hw_queues(&anv->tagset, 1); nvme_unfreeze(&anv->ctrl); if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) { dev_warn(anv->ctrl.device, "failed to mark controller live state\n"); ret = -ENODEV; goto out_remove_sq; } nvme_start_ctrl(&anv->ctrl); dev_dbg(anv->dev, "ANS boot and NVMe init completed."); return; out_remove_sq: apple_nvme_remove_sq(anv); out_remove_cq: apple_nvme_remove_cq(anv); out: dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret); nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); nvme_get_ctrl(&anv->ctrl); apple_nvme_disable(anv, false); nvme_mark_namespaces_dead(&anv->ctrl); if (!queue_work(nvme_wq, &anv->remove_work)) nvme_put_ctrl(&anv->ctrl); } static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work) { struct apple_nvme *anv = container_of(work, struct apple_nvme, remove_work); nvme_put_ctrl(&anv->ctrl); device_release_driver(anv->dev); } static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) { *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); return 0; } static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) { writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); return 0; } static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) { *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); return 0; } static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size) { struct device *dev = ctrl_to_apple_nvme(ctrl)->dev; return snprintf(buf, size, "%s\n", dev_name(dev)); } static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl) { struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl); if (anv->ctrl.admin_q) blk_put_queue(anv->ctrl.admin_q); put_device(anv->dev); } static const struct nvme_ctrl_ops nvme_ctrl_ops = { .name = "apple-nvme", .module = THIS_MODULE, .flags = 0, .reg_read32 = apple_nvme_reg_read32, .reg_write32 = apple_nvme_reg_write32, .reg_read64 = apple_nvme_reg_read64, .free_ctrl = apple_nvme_free_ctrl, .get_address = apple_nvme_get_address, }; static void apple_nvme_async_probe(void *data, async_cookie_t cookie) { struct apple_nvme *anv = data; flush_work(&anv->ctrl.reset_work); flush_work(&anv->ctrl.scan_work); nvme_put_ctrl(&anv->ctrl); } static void devm_apple_nvme_put_tag_set(void *data) { blk_mq_free_tag_set(data); } static int apple_nvme_alloc_tagsets(struct apple_nvme *anv) { int ret; anv->admin_tagset.ops = &apple_nvme_mq_admin_ops; anv->admin_tagset.nr_hw_queues = 1; anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH; anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; anv->admin_tagset.numa_node = NUMA_NO_NODE; anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod); anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED; anv->admin_tagset.driver_data = &anv->adminq; ret = blk_mq_alloc_tag_set(&anv->admin_tagset); if (ret) return ret; ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set, &anv->admin_tagset); if (ret) return ret; anv->tagset.ops = &apple_nvme_mq_ops; anv->tagset.nr_hw_queues = 1; anv->tagset.nr_maps = 1; /* * Tags are used as an index to the NVMMU and must be unique across * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which * must be marked as reserved in the IO queue. */ anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1; anv->tagset.timeout = NVME_IO_TIMEOUT; anv->tagset.numa_node = NUMA_NO_NODE; anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE; anv->tagset.driver_data = &anv->ioq; ret = blk_mq_alloc_tag_set(&anv->tagset); if (ret) return ret; ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set, &anv->tagset); if (ret) return ret; anv->ctrl.admin_tagset = &anv->admin_tagset; anv->ctrl.tagset = &anv->tagset; return 0; } static int apple_nvme_queue_alloc(struct apple_nvme *anv, struct apple_nvme_queue *q) { unsigned int depth = apple_nvme_queue_depth(q); q->cqes = dmam_alloc_coherent(anv->dev, depth * sizeof(struct nvme_completion), &q->cq_dma_addr, GFP_KERNEL); if (!q->cqes) return -ENOMEM; q->sqes = dmam_alloc_coherent(anv->dev, depth * sizeof(struct nvme_command), &q->sq_dma_addr, GFP_KERNEL); if (!q->sqes) return -ENOMEM; /* * We need the maximum queue depth here because the NVMMU only has a * single depth configuration shared between both queues. */ q->tcbs = dmam_alloc_coherent(anv->dev, APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb), &q->tcb_dma_addr, GFP_KERNEL); if (!q->tcbs) return -ENOMEM; /* * initialize phase to make sure the allocated and empty memory * doesn't look like a full cq already. */ q->cq_phase = 1; return 0; } static void apple_nvme_detach_genpd(struct apple_nvme *anv) { int i; if (anv->pd_count <= 1) return; for (i = anv->pd_count - 1; i >= 0; i--) { if (anv->pd_link[i]) device_link_del(anv->pd_link[i]); if (!IS_ERR_OR_NULL(anv->pd_dev[i])) dev_pm_domain_detach(anv->pd_dev[i], true); } } static int apple_nvme_attach_genpd(struct apple_nvme *anv) { struct device *dev = anv->dev; int i; anv->pd_count = of_count_phandle_with_args( dev->of_node, "power-domains", "#power-domain-cells"); if (anv->pd_count <= 1) return 0; anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev), GFP_KERNEL); if (!anv->pd_dev) return -ENOMEM; anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link), GFP_KERNEL); if (!anv->pd_link) return -ENOMEM; for (i = 0; i < anv->pd_count; i++) { anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i); if (IS_ERR(anv->pd_dev[i])) { apple_nvme_detach_genpd(anv); return PTR_ERR(anv->pd_dev[i]); } anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!anv->pd_link[i]) { apple_nvme_detach_genpd(anv); return -EINVAL; } } return 0; } static void devm_apple_nvme_mempool_destroy(void *data) { mempool_destroy(data); } static int apple_nvme_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct apple_nvme *anv; int ret; anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL); if (!anv) return -ENOMEM; anv->dev = get_device(dev); anv->adminq.is_adminq = true; platform_set_drvdata(pdev, anv); ret = apple_nvme_attach_genpd(anv); if (ret < 0) { dev_err_probe(dev, ret, "Failed to attach power domains"); goto put_dev; } if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { ret = -ENXIO; goto put_dev; } anv->irq = platform_get_irq(pdev, 0); if (anv->irq < 0) { ret = anv->irq; goto put_dev; } if (!anv->irq) { ret = -ENXIO; goto put_dev; } anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans"); if (IS_ERR(anv->mmio_coproc)) { ret = PTR_ERR(anv->mmio_coproc); goto put_dev; } anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme"); if (IS_ERR(anv->mmio_nvme)) { ret = PTR_ERR(anv->mmio_nvme); goto put_dev; } anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; anv->sart = devm_apple_sart_get(dev); if (IS_ERR(anv->sart)) { ret = dev_err_probe(dev, PTR_ERR(anv->sart), "Failed to initialize SART"); goto put_dev; } anv->reset = devm_reset_control_array_get_exclusive(anv->dev); if (IS_ERR(anv->reset)) { ret = dev_err_probe(dev, PTR_ERR(anv->reset), "Failed to get reset control"); goto put_dev; } INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work); INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work); spin_lock_init(&anv->lock); ret = apple_nvme_queue_alloc(anv, &anv->adminq); if (ret) goto put_dev; ret = apple_nvme_queue_alloc(anv, &anv->ioq); if (ret) goto put_dev; anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev, NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0); if (!anv->prp_page_pool) { ret = -ENOMEM; goto put_dev; } anv->prp_small_pool = dmam_pool_create("prp list 256", anv->dev, 256, 256, 0); if (!anv->prp_small_pool) { ret = -ENOMEM; goto put_dev; } WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE); anv->iod_mempool = mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size()); if (!anv->iod_mempool) { ret = -ENOMEM; goto put_dev; } ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_mempool_destroy, anv->iod_mempool); if (ret) goto put_dev; ret = apple_nvme_alloc_tagsets(anv); if (ret) goto put_dev; ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0, "nvme-apple", anv); if (ret) { dev_err_probe(dev, ret, "Failed to request IRQ"); goto put_dev; } anv->rtk = devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops); if (IS_ERR(anv->rtk)) { ret = dev_err_probe(dev, PTR_ERR(anv->rtk), "Failed to initialize RTKit"); goto put_dev; } ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops, NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS); if (ret) { dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl"); goto put_dev; } anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset); if (IS_ERR(anv->ctrl.admin_q)) { ret = -ENOMEM; goto put_dev; } nvme_reset_ctrl(&anv->ctrl); async_schedule(apple_nvme_async_probe, anv); return 0; put_dev: put_device(anv->dev); return ret; } static int apple_nvme_remove(struct platform_device *pdev) { struct apple_nvme *anv = platform_get_drvdata(pdev); nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); flush_work(&anv->ctrl.reset_work); nvme_stop_ctrl(&anv->ctrl); nvme_remove_namespaces(&anv->ctrl); apple_nvme_disable(anv, true); nvme_uninit_ctrl(&anv->ctrl); if (apple_rtkit_is_running(anv->rtk)) apple_rtkit_shutdown(anv->rtk); apple_nvme_detach_genpd(anv); return 0; } static void apple_nvme_shutdown(struct platform_device *pdev) { struct apple_nvme *anv = platform_get_drvdata(pdev); apple_nvme_disable(anv, true); if (apple_rtkit_is_running(anv->rtk)) apple_rtkit_shutdown(anv->rtk); } static int apple_nvme_resume(struct device *dev) { struct apple_nvme *anv = dev_get_drvdata(dev); return nvme_reset_ctrl(&anv->ctrl); } static int apple_nvme_suspend(struct device *dev) { struct apple_nvme *anv = dev_get_drvdata(dev); int ret = 0; apple_nvme_disable(anv, true); if (apple_rtkit_is_running(anv->rtk)) ret = apple_rtkit_shutdown(anv->rtk); writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend, apple_nvme_resume); static const struct of_device_id apple_nvme_of_match[] = { { .compatible = "apple,nvme-ans2" }, {}, }; MODULE_DEVICE_TABLE(of, apple_nvme_of_match); static struct platform_driver apple_nvme_driver = { .driver = { .name = "nvme-apple", .of_match_table = apple_nvme_of_match, .pm = pm_sleep_ptr(&apple_nvme_pm_ops), }, .probe = apple_nvme_probe, .remove = apple_nvme_remove, .shutdown = apple_nvme_shutdown, }; module_platform_driver(apple_nvme_driver); MODULE_AUTHOR("Sven Peter <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/nvme/host/apple.c
// SPDX-License-Identifier: GPL-2.0 /* * NVM Express hardware monitoring support * Copyright (c) 2019, Guenter Roeck */ #include <linux/hwmon.h> #include <linux/units.h> #include <asm/unaligned.h> #include "nvme.h" struct nvme_hwmon_data { struct nvme_ctrl *ctrl; struct nvme_smart_log *log; struct mutex read_lock; }; static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, long *temp) { unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT; u32 status; int ret; if (under) threshold |= NVME_TEMP_THRESH_TYPE_UNDER; ret = nvme_get_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0, &status); if (ret > 0) return -EIO; if (ret < 0) return ret; *temp = kelvin_to_millicelsius(status & NVME_TEMP_THRESH_MASK); return 0; } static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, long temp) { unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT; int ret; temp = millicelsius_to_kelvin(temp); threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK); if (under) threshold |= NVME_TEMP_THRESH_TYPE_UNDER; ret = nvme_set_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0, NULL); if (ret > 0) return -EIO; return ret; } static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) { return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, NVME_CSI_NVM, data->log, sizeof(*data->log), 0); } static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct nvme_hwmon_data *data = dev_get_drvdata(dev); struct nvme_smart_log *log = data->log; int temp; int err; /* * First handle attributes which don't require us to read * the smart log. */ switch (attr) { case hwmon_temp_max: return nvme_get_temp_thresh(data->ctrl, channel, false, val); case hwmon_temp_min: return nvme_get_temp_thresh(data->ctrl, channel, true, val); case hwmon_temp_crit: *val = kelvin_to_millicelsius(data->ctrl->cctemp); return 0; default: break; } mutex_lock(&data->read_lock); err = nvme_hwmon_get_smart_log(data); if (err) goto unlock; switch (attr) { case hwmon_temp_input: if (!channel) temp = get_unaligned_le16(log->temperature); else temp = le16_to_cpu(log->temp_sensor[channel - 1]); *val = kelvin_to_millicelsius(temp); break; case hwmon_temp_alarm: *val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE); break; default: err = -EOPNOTSUPP; break; } unlock: mutex_unlock(&data->read_lock); return err; } static int nvme_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { struct nvme_hwmon_data *data = dev_get_drvdata(dev); switch (attr) { case hwmon_temp_max: return nvme_set_temp_thresh(data->ctrl, channel, false, val); case hwmon_temp_min: return nvme_set_temp_thresh(data->ctrl, channel, true, val); default: break; } return -EOPNOTSUPP; } static const char * const nvme_hwmon_sensor_names[] = { "Composite", "Sensor 1", "Sensor 2", "Sensor 3", "Sensor 4", "Sensor 5", "Sensor 6", "Sensor 7", "Sensor 8", }; static int nvme_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { *str = nvme_hwmon_sensor_names[channel]; return 0; } static umode_t nvme_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr, int channel) { const struct nvme_hwmon_data *data = _data; switch (attr) { case hwmon_temp_crit: if (!channel && data->ctrl->cctemp) return 0444; break; case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || (channel && data->log->temp_sensor[channel - 1] && !(data->ctrl->quirks & NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; return 0644; } break; case hwmon_temp_alarm: if (!channel) return 0444; break; case hwmon_temp_input: case hwmon_temp_label: if (!channel || data->log->temp_sensor[channel - 1]) return 0444; break; default: break; } return 0; } static const struct hwmon_channel_info *const nvme_hwmon_info[] = { HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_CRIT | HWMON_T_LABEL | HWMON_T_ALARM, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_LABEL), NULL }; static const struct hwmon_ops nvme_hwmon_ops = { .is_visible = nvme_hwmon_is_visible, .read = nvme_hwmon_read, .read_string = nvme_hwmon_read_string, .write = nvme_hwmon_write, }; static const struct hwmon_chip_info nvme_hwmon_chip_info = { .ops = &nvme_hwmon_ops, .info = nvme_hwmon_info, }; int nvme_hwmon_init(struct nvme_ctrl *ctrl) { struct device *dev = ctrl->device; struct nvme_hwmon_data *data; struct device *hwmon; int err; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->log = kzalloc(sizeof(*data->log), GFP_KERNEL); if (!data->log) { err = -ENOMEM; goto err_free_data; } data->ctrl = ctrl; mutex_init(&data->read_lock); err = nvme_hwmon_get_smart_log(data); if (err) { dev_warn(dev, "Failed to read smart log (error %d)\n", err); goto err_free_log; } hwmon = hwmon_device_register_with_info(dev, "nvme", data, &nvme_hwmon_chip_info, NULL); if (IS_ERR(hwmon)) { dev_warn(dev, "Failed to instantiate hwmon device\n"); err = PTR_ERR(hwmon); goto err_free_log; } ctrl->hwmon_device = hwmon; return 0; err_free_log: kfree(data->log); err_free_data: kfree(data); return err; } void nvme_hwmon_exit(struct nvme_ctrl *ctrl) { if (ctrl->hwmon_device) { struct nvme_hwmon_data *data = dev_get_drvdata(ctrl->hwmon_device); hwmon_device_unregister(ctrl->hwmon_device); ctrl->hwmon_device = NULL; kfree(data->log); kfree(data); } }
linux-master
drivers/nvme/host/hwmon.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Western Digital Corporation or its affiliates. */ #include <linux/blkdev.h> #include <linux/vmalloc.h> #include "nvme.h" int nvme_revalidate_zones(struct nvme_ns *ns) { struct request_queue *q = ns->queue; blk_queue_chunk_sectors(q, ns->zsze); blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); return blk_revalidate_disk_zones(ns->disk, NULL); } static int nvme_set_max_append(struct nvme_ctrl *ctrl) { struct nvme_command c = { }; struct nvme_id_ctrl_zns *id; int status; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; c.identify.opcode = nvme_admin_identify; c.identify.cns = NVME_ID_CNS_CS_CTRL; c.identify.csi = NVME_CSI_ZNS; status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); if (status) { kfree(id); return status; } if (id->zasl) ctrl->max_zone_append = 1 << (id->zasl + 3); else ctrl->max_zone_append = ctrl->max_hw_sectors; kfree(id); return 0; } int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) { struct nvme_effects_log *log = ns->head->effects; struct request_queue *q = ns->queue; struct nvme_command c = { }; struct nvme_id_ns_zns *id; int status; /* Driver requires zone append support */ if ((le32_to_cpu(log->iocs[nvme_cmd_zone_append]) & NVME_CMD_EFFECTS_CSUPP)) { if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags)) dev_warn(ns->ctrl->device, "Zone Append supported for zoned namespace:%d. Remove read-only mode\n", ns->head->ns_id); } else { set_bit(NVME_NS_FORCE_RO, &ns->flags); dev_warn(ns->ctrl->device, "Zone Append not supported for zoned namespace:%d. Forcing to read-only mode\n", ns->head->ns_id); } /* Lazily query controller append limit for the first zoned namespace */ if (!ns->ctrl->max_zone_append) { status = nvme_set_max_append(ns->ctrl); if (status) return status; } id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(ns->head->ns_id); c.identify.cns = NVME_ID_CNS_CS_NS; c.identify.csi = NVME_CSI_ZNS; status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id)); if (status) goto free_data; /* * We currently do not handle devices requiring any of the zoned * operation characteristics. */ if (id->zoc) { dev_warn(ns->ctrl->device, "zone operations:%x not supported for namespace:%u\n", le16_to_cpu(id->zoc), ns->head->ns_id); status = -ENODEV; goto free_data; } ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze)); if (!is_power_of_2(ns->zsze)) { dev_warn(ns->ctrl->device, "invalid zone size:%llu for namespace:%u\n", ns->zsze, ns->head->ns_id); status = -ENODEV; goto free_data; } disk_set_zoned(ns->disk, BLK_ZONED_HM); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1); disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1); free_data: kfree(id); return status; } static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, unsigned int nr_zones, size_t *buflen) { struct request_queue *q = ns->disk->queue; size_t bufsize; void *buf; const size_t min_bufsize = sizeof(struct nvme_zone_report) + sizeof(struct nvme_zone_descriptor); nr_zones = min_t(unsigned int, nr_zones, get_capacity(ns->disk) >> ilog2(ns->zsze)); bufsize = sizeof(struct nvme_zone_report) + nr_zones * sizeof(struct nvme_zone_descriptor); bufsize = min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); while (bufsize >= min_bufsize) { buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); if (buf) { *buflen = bufsize; return buf; } bufsize >>= 1; } return NULL; } static int nvme_zone_parse_entry(struct nvme_ns *ns, struct nvme_zone_descriptor *entry, unsigned int idx, report_zones_cb cb, void *data) { struct blk_zone zone = { }; if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { dev_err(ns->ctrl->device, "invalid zone type %#x\n", entry->zt); return -EINVAL; } zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; zone.cond = entry->zs >> 4; zone.len = ns->zsze; zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); if (zone.cond == BLK_ZONE_COND_FULL) zone.wp = zone.start + zone.len; else zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); return cb(&zone, idx, data); } int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) { struct nvme_zone_report *report; struct nvme_command c = { }; int ret, zone_idx = 0; unsigned int nz, i; size_t buflen; if (ns->head->ids.csi != NVME_CSI_ZNS) return -EINVAL; report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); if (!report) return -ENOMEM; c.zmr.opcode = nvme_cmd_zone_mgmt_recv; c.zmr.nsid = cpu_to_le32(ns->head->ns_id); c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen)); c.zmr.zra = NVME_ZRA_ZONE_REPORT; c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL; c.zmr.pr = NVME_REPORT_ZONE_PARTIAL; sector &= ~(ns->zsze - 1); while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { memset(report, 0, buflen); c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector)); ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); if (ret) { if (ret > 0) ret = -EIO; goto out_free; } nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones); if (!nz) break; for (i = 0; i < nz && zone_idx < nr_zones; i++) { ret = nvme_zone_parse_entry(ns, &report->entries[i], zone_idx, cb, data); if (ret) goto out_free; zone_idx++; } sector += ns->zsze * nz; } if (zone_idx > 0) ret = zone_idx; else ret = -EINVAL; out_free: kvfree(report); return ret; } blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *c, enum nvme_zone_mgmt_action action) { memset(c, 0, sizeof(*c)); c->zms.opcode = nvme_cmd_zone_mgmt_send; c->zms.nsid = cpu_to_le32(ns->head->ns_id); c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); c->zms.zsa = action; if (req_op(req) == REQ_OP_ZONE_RESET_ALL) c->zms.select_all = 1; return BLK_STS_OK; }
linux-master
drivers/nvme/host/zns.c
// SPDX-License-Identifier: GPL-2.0 /* * NVM Express device driver * Copyright (c) 2011-2014, Intel Corporation. */ #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/blk-integrity.h> #include <linux/compat.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pr.h> #include <linux/ptrace.h> #include <linux/nvme_ioctl.h> #include <linux/pm_qos.h> #include <asm/unaligned.h> #include "nvme.h" #include "fabrics.h" #include <linux/nvme-auth.h> #define CREATE_TRACE_POINTS #include "trace.h" #define NVME_MINORS (1U << MINORBITS) struct nvme_ns_info { struct nvme_ns_ids ids; u32 nsid; __le32 anagrpid; bool is_shared; bool is_readonly; bool is_ready; bool is_removed; }; unsigned int admin_timeout = 60; module_param(admin_timeout, uint, 0644); MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); EXPORT_SYMBOL_GPL(admin_timeout); unsigned int nvme_io_timeout = 30; module_param_named(io_timeout, nvme_io_timeout, uint, 0644); MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); EXPORT_SYMBOL_GPL(nvme_io_timeout); static unsigned char shutdown_timeout = 5; module_param(shutdown_timeout, byte, 0644); MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); static u8 nvme_max_retries = 5; module_param_named(max_retries, nvme_max_retries, byte, 0644); MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); static unsigned long default_ps_max_latency_us = 100000; module_param(default_ps_max_latency_us, ulong, 0644); MODULE_PARM_DESC(default_ps_max_latency_us, "max power saving latency for new devices; use PM QOS to change per device"); static bool force_apst; module_param(force_apst, bool, 0644); MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); static unsigned long apst_primary_timeout_ms = 100; module_param(apst_primary_timeout_ms, ulong, 0644); MODULE_PARM_DESC(apst_primary_timeout_ms, "primary APST timeout in ms"); static unsigned long apst_secondary_timeout_ms = 2000; module_param(apst_secondary_timeout_ms, ulong, 0644); MODULE_PARM_DESC(apst_secondary_timeout_ms, "secondary APST timeout in ms"); static unsigned long apst_primary_latency_tol_us = 15000; module_param(apst_primary_latency_tol_us, ulong, 0644); MODULE_PARM_DESC(apst_primary_latency_tol_us, "primary APST latency tolerance in us"); static unsigned long apst_secondary_latency_tol_us = 100000; module_param(apst_secondary_latency_tol_us, ulong, 0644); MODULE_PARM_DESC(apst_secondary_latency_tol_us, "secondary APST latency tolerance in us"); /* * nvme_wq - hosts nvme related works that are not reset or delete * nvme_reset_wq - hosts nvme reset works * nvme_delete_wq - hosts nvme delete works * * nvme_wq will host works such as scan, aen handling, fw activation, * keep-alive, periodic reconnects etc. nvme_reset_wq * runs reset works which also flush works hosted on nvme_wq for * serialization purposes. nvme_delete_wq host controller deletion * works which flush reset works for serialization. */ struct workqueue_struct *nvme_wq; EXPORT_SYMBOL_GPL(nvme_wq); struct workqueue_struct *nvme_reset_wq; EXPORT_SYMBOL_GPL(nvme_reset_wq); struct workqueue_struct *nvme_delete_wq; EXPORT_SYMBOL_GPL(nvme_delete_wq); static LIST_HEAD(nvme_subsystems); static DEFINE_MUTEX(nvme_subsystems_lock); static DEFINE_IDA(nvme_instance_ida); static dev_t nvme_ctrl_base_chr_devt; static struct class *nvme_class; static struct class *nvme_subsys_class; static DEFINE_IDA(nvme_ns_chr_minor_ida); static dev_t nvme_ns_chr_devt; static struct class *nvme_ns_chr_class; static void nvme_put_subsystem(struct nvme_subsystem *subsys); static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid); static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, struct nvme_command *cmd); void nvme_queue_scan(struct nvme_ctrl *ctrl) { /* * Only new queue scan work when admin and IO queues are both alive */ if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) queue_work(nvme_wq, &ctrl->scan_work); } /* * Use this function to proceed with scheduling reset_work for a controller * that had previously been set to the resetting state. This is intended for * code paths that can't be interrupted by other reset attempts. A hot removal * may prevent this from succeeding. */ int nvme_try_sched_reset(struct nvme_ctrl *ctrl) { if (ctrl->state != NVME_CTRL_RESETTING) return -EBUSY; if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) return -EBUSY; return 0; } EXPORT_SYMBOL_GPL(nvme_try_sched_reset); static void nvme_failfast_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_ctrl, failfast_work); if (ctrl->state != NVME_CTRL_CONNECTING) return; set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); dev_info(ctrl->device, "failfast expired\n"); nvme_kick_requeue_lists(ctrl); } static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) { if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) return; schedule_delayed_work(&ctrl->failfast_work, ctrl->opts->fast_io_fail_tmo * HZ); } static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) { if (!ctrl->opts) return; cancel_delayed_work_sync(&ctrl->failfast_work); clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); } int nvme_reset_ctrl(struct nvme_ctrl *ctrl) { if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) return -EBUSY; if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) return -EBUSY; return 0; } EXPORT_SYMBOL_GPL(nvme_reset_ctrl); int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) { int ret; ret = nvme_reset_ctrl(ctrl); if (!ret) { flush_work(&ctrl->reset_work); if (ctrl->state != NVME_CTRL_LIVE) ret = -ENETRESET; } return ret; } static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) { dev_info(ctrl->device, "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); flush_work(&ctrl->reset_work); nvme_stop_ctrl(ctrl); nvme_remove_namespaces(ctrl); ctrl->ops->delete_ctrl(ctrl); nvme_uninit_ctrl(ctrl); } static void nvme_delete_ctrl_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, delete_work); nvme_do_delete_ctrl(ctrl); } int nvme_delete_ctrl(struct nvme_ctrl *ctrl) { if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) return -EBUSY; if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) return -EBUSY; return 0; } EXPORT_SYMBOL_GPL(nvme_delete_ctrl); void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) { /* * Keep a reference until nvme_do_delete_ctrl() complete, * since ->delete_ctrl can free the controller. */ nvme_get_ctrl(ctrl); if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) nvme_do_delete_ctrl(ctrl); nvme_put_ctrl(ctrl); } static blk_status_t nvme_error_status(u16 status) { switch (status & 0x7ff) { case NVME_SC_SUCCESS: return BLK_STS_OK; case NVME_SC_CAP_EXCEEDED: return BLK_STS_NOSPC; case NVME_SC_LBA_RANGE: case NVME_SC_CMD_INTERRUPTED: case NVME_SC_NS_NOT_READY: return BLK_STS_TARGET; case NVME_SC_BAD_ATTRIBUTES: case NVME_SC_ONCS_NOT_SUPPORTED: case NVME_SC_INVALID_OPCODE: case NVME_SC_INVALID_FIELD: case NVME_SC_INVALID_NS: return BLK_STS_NOTSUPP; case NVME_SC_WRITE_FAULT: case NVME_SC_READ_ERROR: case NVME_SC_UNWRITTEN_BLOCK: case NVME_SC_ACCESS_DENIED: case NVME_SC_READ_ONLY: case NVME_SC_COMPARE_FAILED: return BLK_STS_MEDIUM; case NVME_SC_GUARD_CHECK: case NVME_SC_APPTAG_CHECK: case NVME_SC_REFTAG_CHECK: case NVME_SC_INVALID_PI: return BLK_STS_PROTECTION; case NVME_SC_RESERVATION_CONFLICT: return BLK_STS_RESV_CONFLICT; case NVME_SC_HOST_PATH_ERROR: return BLK_STS_TRANSPORT; case NVME_SC_ZONE_TOO_MANY_ACTIVE: return BLK_STS_ZONE_ACTIVE_RESOURCE; case NVME_SC_ZONE_TOO_MANY_OPEN: return BLK_STS_ZONE_OPEN_RESOURCE; default: return BLK_STS_IOERR; } } static void nvme_retry_req(struct request *req) { unsigned long delay = 0; u16 crd; /* The mask and shift result must be <= 3 */ crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; if (crd) delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; nvme_req(req)->retries++; blk_mq_requeue_request(req, false); blk_mq_delay_kick_requeue_list(req->q, delay); } static void nvme_log_error(struct request *req) { struct nvme_ns *ns = req->q->queuedata; struct nvme_request *nr = nvme_req(req); if (ns) { pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", ns->disk ? ns->disk->disk_name : "?", nvme_get_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, nvme_get_error_status_str(nr->status), nr->status >> 8 & 7, /* Status Code Type */ nr->status & 0xff, /* Status Code */ nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_SC_DNR ? "DNR " : ""); return; } pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", dev_name(nr->ctrl->device), nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), nr->status >> 8 & 7, /* Status Code Type */ nr->status & 0xff, /* Status Code */ nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_SC_DNR ? "DNR " : ""); } enum nvme_disposition { COMPLETE, RETRY, FAILOVER, AUTHENTICATE, }; static inline enum nvme_disposition nvme_decide_disposition(struct request *req) { if (likely(nvme_req(req)->status == 0)) return COMPLETE; if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) return AUTHENTICATE; if (blk_noretry_request(req) || (nvme_req(req)->status & NVME_SC_DNR) || nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; if (req->cmd_flags & REQ_NVME_MPATH) { if (nvme_is_path_error(nvme_req(req)->status) || blk_queue_dying(req->q)) return FAILOVER; } else { if (blk_queue_dying(req->q)) return COMPLETE; } return RETRY; } static inline void nvme_end_req_zoned(struct request *req) { if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && req_op(req) == REQ_OP_ZONE_APPEND) req->__sector = nvme_lba_to_sect(req->q->queuedata, le64_to_cpu(nvme_req(req)->result.u64)); } static inline void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) nvme_log_error(req); nvme_end_req_zoned(req); nvme_trace_bio_complete(req); if (req->cmd_flags & REQ_NVME_MPATH) nvme_mpath_end_request(req); blk_mq_end_request(req, status); } void nvme_complete_rq(struct request *req) { struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); /* * Completions of long-running commands should not be able to * defer sending of periodic keep alives, since the controller * may have completed processing such commands a long time ago * (arbitrarily close to command submission time). * req->deadline - req->timeout is the command submission time * in jiffies. */ if (ctrl->kas && req->deadline - req->timeout >= ctrl->ka_last_check_time) ctrl->comp_seen = true; switch (nvme_decide_disposition(req)) { case COMPLETE: nvme_end_req(req); return; case RETRY: nvme_retry_req(req); return; case FAILOVER: nvme_failover_req(req); return; case AUTHENTICATE: #ifdef CONFIG_NVME_AUTH queue_work(nvme_wq, &ctrl->dhchap_auth_work); nvme_retry_req(req); #else nvme_end_req(req); #endif return; } } EXPORT_SYMBOL_GPL(nvme_complete_rq); void nvme_complete_batch_req(struct request *req) { trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); nvme_end_req_zoned(req); } EXPORT_SYMBOL_GPL(nvme_complete_batch_req); /* * Called to unwind from ->queue_rq on a failed command submission so that the * multipathing code gets called to potentially failover to another path. * The caller needs to unwind all transport specific resource allocations and * must return propagate the return value. */ blk_status_t nvme_host_path_error(struct request *req) { nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; blk_mq_set_request_complete(req); nvme_complete_rq(req); return BLK_STS_OK; } EXPORT_SYMBOL_GPL(nvme_host_path_error); bool nvme_cancel_request(struct request *req, void *data) { dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, "Cancelling I/O %d", req->tag); /* don't abort one completed or idle request */ if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) return true; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; nvme_req(req)->flags |= NVME_REQ_CANCELLED; blk_mq_complete_request(req); return true; } EXPORT_SYMBOL_GPL(nvme_cancel_request); void nvme_cancel_tagset(struct nvme_ctrl *ctrl) { if (ctrl->tagset) { blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl); blk_mq_tagset_wait_completed_request(ctrl->tagset); } } EXPORT_SYMBOL_GPL(nvme_cancel_tagset); void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) { if (ctrl->admin_tagset) { blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl); blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); } } EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state) { enum nvme_ctrl_state old_state; unsigned long flags; bool changed = false; spin_lock_irqsave(&ctrl->lock, flags); old_state = ctrl->state; switch (new_state) { case NVME_CTRL_LIVE: switch (old_state) { case NVME_CTRL_NEW: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; fallthrough; default: break; } break; case NVME_CTRL_RESETTING: switch (old_state) { case NVME_CTRL_NEW: case NVME_CTRL_LIVE: changed = true; fallthrough; default: break; } break; case NVME_CTRL_CONNECTING: switch (old_state) { case NVME_CTRL_NEW: case NVME_CTRL_RESETTING: changed = true; fallthrough; default: break; } break; case NVME_CTRL_DELETING: switch (old_state) { case NVME_CTRL_LIVE: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; fallthrough; default: break; } break; case NVME_CTRL_DELETING_NOIO: switch (old_state) { case NVME_CTRL_DELETING: case NVME_CTRL_DEAD: changed = true; fallthrough; default: break; } break; case NVME_CTRL_DEAD: switch (old_state) { case NVME_CTRL_DELETING: changed = true; fallthrough; default: break; } break; default: break; } if (changed) { ctrl->state = new_state; wake_up_all(&ctrl->state_wq); } spin_unlock_irqrestore(&ctrl->lock, flags); if (!changed) return false; if (ctrl->state == NVME_CTRL_LIVE) { if (old_state == NVME_CTRL_CONNECTING) nvme_stop_failfast_work(ctrl); nvme_kick_requeue_lists(ctrl); } else if (ctrl->state == NVME_CTRL_CONNECTING && old_state == NVME_CTRL_RESETTING) { nvme_start_failfast_work(ctrl); } return changed; } EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); /* * Returns true for sink states that can't ever transition back to live. */ static bool nvme_state_terminal(struct nvme_ctrl *ctrl) { switch (ctrl->state) { case NVME_CTRL_NEW: case NVME_CTRL_LIVE: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: return false; case NVME_CTRL_DELETING: case NVME_CTRL_DELETING_NOIO: case NVME_CTRL_DEAD: return true; default: WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); return true; } } /* * Waits for the controller state to be resetting, or returns false if it is * not possible to ever transition to that state. */ bool nvme_wait_reset(struct nvme_ctrl *ctrl) { wait_event(ctrl->state_wq, nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || nvme_state_terminal(ctrl)); return ctrl->state == NVME_CTRL_RESETTING; } EXPORT_SYMBOL_GPL(nvme_wait_reset); static void nvme_free_ns_head(struct kref *ref) { struct nvme_ns_head *head = container_of(ref, struct nvme_ns_head, ref); nvme_mpath_remove_disk(head); ida_free(&head->subsys->ns_ida, head->instance); cleanup_srcu_struct(&head->srcu); nvme_put_subsystem(head->subsys); kfree(head); } bool nvme_tryget_ns_head(struct nvme_ns_head *head) { return kref_get_unless_zero(&head->ref); } void nvme_put_ns_head(struct nvme_ns_head *head) { kref_put(&head->ref, nvme_free_ns_head); } static void nvme_free_ns(struct kref *kref) { struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); put_disk(ns->disk); nvme_put_ns_head(ns->head); nvme_put_ctrl(ns->ctrl); kfree(ns); } static inline bool nvme_get_ns(struct nvme_ns *ns) { return kref_get_unless_zero(&ns->kref); } void nvme_put_ns(struct nvme_ns *ns) { kref_put(&ns->kref, nvme_free_ns); } EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); static inline void nvme_clear_nvme_request(struct request *req) { nvme_req(req)->status = 0; nvme_req(req)->retries = 0; nvme_req(req)->flags = 0; req->rq_flags |= RQF_DONTPREP; } /* initialize a passthrough request */ void nvme_init_request(struct request *req, struct nvme_command *cmd) { if (req->q->queuedata) req->timeout = NVME_IO_TIMEOUT; else /* no queuedata implies admin queue */ req->timeout = NVME_ADMIN_TIMEOUT; /* passthru commands should let the driver set the SGL flags */ cmd->common.flags &= ~NVME_CMD_SGL_ALL; req->cmd_flags |= REQ_FAILFAST_DRIVER; if (req->mq_hctx->type == HCTX_TYPE_POLL) req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); req->rq_flags |= RQF_QUIET; memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); } EXPORT_SYMBOL_GPL(nvme_init_request); /* * For something we're not in a state to send to the device the default action * is to busy it and retry it after the controller state is recovered. However, * if the controller is deleting or if anything is marked for failfast or * nvme multipath it is immediately failed. * * Note: commands used to initialize the controller will be marked for failfast. * Note: nvme cli/ioctl commands are marked for failfast. */ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, struct request *rq) { if (ctrl->state != NVME_CTRL_DELETING_NOIO && ctrl->state != NVME_CTRL_DELETING && ctrl->state != NVME_CTRL_DEAD && !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; return nvme_host_path_error(rq); } EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool queue_live) { struct nvme_request *req = nvme_req(rq); /* * currently we have a problem sending passthru commands * on the admin_q if the controller is not LIVE because we can't * make sure that they are going out after the admin connect, * controller enable and/or other commands in the initialization * sequence. until the controller will be LIVE, fail with * BLK_STS_RESOURCE so that they will be rescheduled. */ if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) return false; if (ctrl->ops->flags & NVME_F_FABRICS) { /* * Only allow commands on a live queue, except for the connect * command, which is require to set the queue live in the * appropinquate states. */ switch (ctrl->state) { case NVME_CTRL_CONNECTING: if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) return true; break; default: break; case NVME_CTRL_DEAD: return false; } } return queue_live; } EXPORT_SYMBOL_GPL(__nvme_check_ready); static inline void nvme_setup_flush(struct nvme_ns *ns, struct nvme_command *cmnd) { memset(cmnd, 0, sizeof(*cmnd)); cmnd->common.opcode = nvme_cmd_flush; cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); } static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd) { unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; struct nvme_dsm_range *range; struct bio *bio; /* * Some devices do not consider the DSM 'Number of Ranges' field when * determining how much data to DMA. Always allocate memory for maximum * number of segments to prevent device reading beyond end of buffer. */ static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); if (!range) { /* * If we fail allocation our range, fallback to the controller * discard page. If that's also busy, it's safe to return * busy, as we know we can make progress once that's freed. */ if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) return BLK_STS_RESOURCE; range = page_address(ns->ctrl->discard_page); } if (queue_max_discard_segments(req->q) == 1) { u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); range[0].cattr = cpu_to_le32(0); range[0].nlb = cpu_to_le32(nlb); range[0].slba = cpu_to_le64(slba); n = 1; } else { __rq_for_each_bio(bio, req) { u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; if (n < segments) { range[n].cattr = cpu_to_le32(0); range[n].nlb = cpu_to_le32(nlb); range[n].slba = cpu_to_le64(slba); } n++; } } if (WARN_ON_ONCE(n != segments)) { if (virt_to_page(range) == ns->ctrl->discard_page) clear_bit_unlock(0, &ns->ctrl->discard_page_busy); else kfree(range); return BLK_STS_IOERR; } memset(cmnd, 0, sizeof(*cmnd)); cmnd->dsm.opcode = nvme_cmd_dsm; cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); cmnd->dsm.nr = cpu_to_le32(segments - 1); cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); bvec_set_virt(&req->special_vec, range, alloc_size); req->rq_flags |= RQF_SPECIAL_PAYLOAD; return BLK_STS_OK; } static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, struct request *req) { u32 upper, lower; u64 ref48; /* both rw and write zeroes share the same reftag format */ switch (ns->guard_type) { case NVME_NVM_NS_16B_GUARD: cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); break; case NVME_NVM_NS_64B_GUARD: ref48 = ext_pi_ref_tag(req); lower = lower_32_bits(ref48); upper = upper_32_bits(ref48); cmnd->rw.reftag = cpu_to_le32(lower); cmnd->rw.cdw3 = cpu_to_le32(upper); break; default: break; } } static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd) { memset(cmnd, 0, sizeof(*cmnd)); if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) return nvme_setup_discard(ns, req, cmnd); cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); cmnd->write_zeroes.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->write_zeroes.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC)) cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); if (nvme_ns_has_pi(ns)) { cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); switch (ns->pi_type) { case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: nvme_set_ref_tag(ns, cmnd, req); break; } } return BLK_STS_OK; } static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_opcode op) { u16 control = 0; u32 dsmgmt = 0; if (req->cmd_flags & REQ_FUA) control |= NVME_RW_FUA; if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) control |= NVME_RW_LR; if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; cmnd->rw.opcode = op; cmnd->rw.flags = 0; cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); cmnd->rw.cdw2 = 0; cmnd->rw.cdw3 = 0; cmnd->rw.metadata = 0; cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cmnd->rw.reftag = 0; cmnd->rw.apptag = 0; cmnd->rw.appmask = 0; if (ns->ms) { /* * If formated with metadata, the block layer always provides a * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else * we enable the PRACT bit for protection information or set the * namespace capacity to zero to prevent any I/O. */ if (!blk_integrity_rq(req)) { if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) return BLK_STS_NOTSUPP; control |= NVME_RW_PRINFO_PRACT; } switch (ns->pi_type) { case NVME_NS_DPS_PI_TYPE3: control |= NVME_RW_PRINFO_PRCHK_GUARD; break; case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: control |= NVME_RW_PRINFO_PRCHK_GUARD | NVME_RW_PRINFO_PRCHK_REF; if (op == nvme_cmd_zone_append) control |= NVME_RW_APPEND_PIREMAP; nvme_set_ref_tag(ns, cmnd, req); break; } } cmnd->rw.control = cpu_to_le16(control); cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); return 0; } void nvme_cleanup_cmd(struct request *req) { if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; if (req->special_vec.bv_page == ctrl->discard_page) clear_bit_unlock(0, &ctrl->discard_page_busy); else kfree(bvec_virt(&req->special_vec)); } } EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) { struct nvme_command *cmd = nvme_req(req)->cmd; blk_status_t ret = BLK_STS_OK; if (!(req->rq_flags & RQF_DONTPREP)) nvme_clear_nvme_request(req); switch (req_op(req)) { case REQ_OP_DRV_IN: case REQ_OP_DRV_OUT: /* these are setup prior to execution in nvme_init_request() */ break; case REQ_OP_FLUSH: nvme_setup_flush(ns, cmd); break; case REQ_OP_ZONE_RESET_ALL: case REQ_OP_ZONE_RESET: ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); break; case REQ_OP_ZONE_OPEN: ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); break; case REQ_OP_ZONE_CLOSE: ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); break; case REQ_OP_ZONE_FINISH: ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); break; case REQ_OP_WRITE_ZEROES: ret = nvme_setup_write_zeroes(ns, req, cmd); break; case REQ_OP_DISCARD: ret = nvme_setup_discard(ns, req, cmd); break; case REQ_OP_READ: ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); break; case REQ_OP_WRITE: ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); break; case REQ_OP_ZONE_APPEND: ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); break; default: WARN_ON_ONCE(1); return BLK_STS_IOERR; } cmd->common.command_id = nvme_cid(req); trace_nvme_setup_cmd(req, cmd); return ret; } EXPORT_SYMBOL_GPL(nvme_setup_cmd); /* * Return values: * 0: success * >0: nvme controller's cqe status response * <0: kernel error in lieu of controller response */ int nvme_execute_rq(struct request *rq, bool at_head) { blk_status_t status; status = blk_execute_rq(rq, at_head); if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) return -EINTR; if (nvme_req(rq)->status) return nvme_req(rq)->status; return blk_status_to_errno(status); } EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); /* * Returns 0 on success. If the result is negative, it's a Linux error code; * if the result is positive, it's an NVM Express status code */ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, int qid, int at_head, blk_mq_req_flags_t flags) { struct request *req; int ret; if (qid == NVME_QID_ANY) req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); else req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, qid - 1); if (IS_ERR(req)) return PTR_ERR(req); nvme_init_request(req, cmd); if (buffer && bufflen) { ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); if (ret) goto out; } ret = nvme_execute_rq(req, at_head); if (result && ret >= 0) *result = nvme_req(req)->result; out: blk_mq_free_request(req); return ret; } EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buffer, unsigned bufflen) { return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, NVME_QID_ANY, 0, 0); } EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) { u32 effects = 0; if (ns) { effects = le32_to_cpu(ns->head->effects->iocs[opcode]); if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) dev_warn_once(ctrl->device, "IO command:%02x has unusual effects:%08x\n", opcode, effects); /* * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, * which would deadlock when done on an I/O command. Note that * We already warn about an unusual effect above. */ effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } else { effects = le32_to_cpu(ctrl->effects->acs[opcode]); } return effects; } EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) { u32 effects = nvme_command_effects(ctrl, ns, opcode); /* * For simplicity, IO to all namespaces is quiesced even if the command * effects say only one namespace is affected. */ if (effects & NVME_CMD_EFFECTS_CSE_MASK) { mutex_lock(&ctrl->scan_lock); mutex_lock(&ctrl->subsys->lock); nvme_mpath_start_freeze(ctrl->subsys); nvme_mpath_wait_freeze(ctrl->subsys); nvme_start_freeze(ctrl); nvme_wait_freeze(ctrl); } return effects; } EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status) { if (effects & NVME_CMD_EFFECTS_CSE_MASK) { nvme_unfreeze(ctrl); nvme_mpath_unfreeze(ctrl->subsys); mutex_unlock(&ctrl->subsys->lock); mutex_unlock(&ctrl->scan_lock); } if (effects & NVME_CMD_EFFECTS_CCC) { if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags)) { dev_info(ctrl->device, "controller capabilities changed, reset may be required to take effect.\n"); } } if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { nvme_queue_scan(ctrl); flush_work(&ctrl->scan_work); } if (ns) return; switch (cmd->common.opcode) { case nvme_admin_set_features: switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { case NVME_FEAT_KATO: /* * Keep alive commands interval on the host should be * updated when KATO is modified by Set Features * commands. */ if (!status) nvme_update_keep_alive(ctrl, cmd); break; default: break; } break; default: break; } } EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); /* * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: * * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) { unsigned long delay = ctrl->kato * HZ / 2; /* * When using Traffic Based Keep Alive, we need to run * nvme_keep_alive_work at twice the normal frequency, as one * command completion can postpone sending a keep alive command * by up to twice the delay between runs. */ if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) delay /= 2; return delay; } static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) { queue_delayed_work(nvme_wq, &ctrl->ka_work, nvme_keep_alive_work_period(ctrl)); } static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, blk_status_t status) { struct nvme_ctrl *ctrl = rq->end_io_data; unsigned long flags; bool startka = false; unsigned long rtt = jiffies - (rq->deadline - rq->timeout); unsigned long delay = nvme_keep_alive_work_period(ctrl); /* * Subtract off the keepalive RTT so nvme_keep_alive_work runs * at the desired frequency. */ if (rtt <= delay) { delay -= rtt; } else { dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", jiffies_to_msecs(rtt)); delay = 0; } blk_mq_free_request(rq); if (status) { dev_err(ctrl->device, "failed nvme_keep_alive_end_io error=%d\n", status); return RQ_END_IO_NONE; } ctrl->ka_last_check_time = jiffies; ctrl->comp_seen = false; spin_lock_irqsave(&ctrl->lock, flags); if (ctrl->state == NVME_CTRL_LIVE || ctrl->state == NVME_CTRL_CONNECTING) startka = true; spin_unlock_irqrestore(&ctrl->lock, flags); if (startka) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); return RQ_END_IO_NONE; } static void nvme_keep_alive_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_ctrl, ka_work); bool comp_seen = ctrl->comp_seen; struct request *rq; ctrl->ka_last_check_time = jiffies; if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { dev_dbg(ctrl->device, "reschedule traffic based keep-alive timer\n"); ctrl->comp_seen = false; nvme_queue_keep_alive_work(ctrl); return; } rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); if (IS_ERR(rq)) { /* allocation failure, reset the controller */ dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); nvme_reset_ctrl(ctrl); return; } nvme_init_request(rq, &ctrl->ka_cmd); rq->timeout = ctrl->kato * HZ; rq->end_io = nvme_keep_alive_end_io; rq->end_io_data = ctrl; blk_execute_rq_nowait(rq, false); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) { if (unlikely(ctrl->kato == 0)) return; nvme_queue_keep_alive_work(ctrl); } void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) { if (unlikely(ctrl->kato == 0)) return; cancel_delayed_work_sync(&ctrl->ka_work); } EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, struct nvme_command *cmd) { unsigned int new_kato = DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); dev_info(ctrl->device, "keep alive interval updated from %u ms to %u ms\n", ctrl->kato * 1000 / 2, new_kato * 1000 / 2); nvme_stop_keep_alive(ctrl); ctrl->kato = new_kato; nvme_start_keep_alive(ctrl); } /* * In NVMe 1.0 the CNS field was just a binary controller or namespace * flag, thus sending any new CNS opcodes has a big chance of not working. * Qemu unfortunately had that bug after reporting a 1.1 version compliance * (but not for any later version). */ static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) { if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) return ctrl->vs < NVME_VS(1, 2, 0); return ctrl->vs < NVME_VS(1, 1, 0); } static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) { struct nvme_command c = { }; int error; /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ c.identify.opcode = nvme_admin_identify; c.identify.cns = NVME_ID_CNS_CTRL; *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); if (!*id) return -ENOMEM; error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, sizeof(struct nvme_id_ctrl)); if (error) kfree(*id); return error; } static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, struct nvme_ns_id_desc *cur, bool *csi_seen) { const char *warn_str = "ctrl returned bogus length:"; void *data = cur; switch (cur->nidt) { case NVME_NIDT_EUI64: if (cur->nidl != NVME_NIDT_EUI64_LEN) { dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", warn_str, cur->nidl); return -1; } if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) return NVME_NIDT_EUI64_LEN; memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); return NVME_NIDT_EUI64_LEN; case NVME_NIDT_NGUID: if (cur->nidl != NVME_NIDT_NGUID_LEN) { dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", warn_str, cur->nidl); return -1; } if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) return NVME_NIDT_NGUID_LEN; memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); return NVME_NIDT_NGUID_LEN; case NVME_NIDT_UUID: if (cur->nidl != NVME_NIDT_UUID_LEN) { dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", warn_str, cur->nidl); return -1; } if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) return NVME_NIDT_UUID_LEN; uuid_copy(&ids->uuid, data + sizeof(*cur)); return NVME_NIDT_UUID_LEN; case NVME_NIDT_CSI: if (cur->nidl != NVME_NIDT_CSI_LEN) { dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", warn_str, cur->nidl); return -1; } memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); *csi_seen = true; return NVME_NIDT_CSI_LEN; default: /* Skip unknown types */ return cur->nidl; } } static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { struct nvme_command c = { }; bool csi_seen = false; int status, pos, len; void *data; if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) return 0; if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) return 0; c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(info->nsid); c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); if (!data) return -ENOMEM; status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, NVME_IDENTIFY_DATA_SIZE); if (status) { dev_warn(ctrl->device, "Identify Descriptors failed (nsid=%u, status=0x%x)\n", info->nsid, status); goto free_data; } for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { struct nvme_ns_id_desc *cur = data + pos; if (cur->nidl == 0) break; len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); if (len < 0) break; len += sizeof(*cur); } if (nvme_multi_css(ctrl) && !csi_seen) { dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", info->nsid); status = -EINVAL; } free_data: kfree(data); return status; } static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, struct nvme_id_ns **id) { struct nvme_command c = { }; int error; /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS; *id = kmalloc(sizeof(**id), GFP_KERNEL); if (!*id) return -ENOMEM; error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); if (error) { dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); kfree(*id); } return error; } static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { struct nvme_ns_ids *ids = &info->ids; struct nvme_id_ns *id; int ret; ret = nvme_identify_ns(ctrl, info->nsid, &id); if (ret) return ret; if (id->ncap == 0) { /* namespace not allocated or attached */ info->is_removed = true; return -ENODEV; } info->anagrpid = id->anagrpid; info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; info->is_ready = true; if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { dev_info(ctrl->device, "Ignoring bogus Namespace Identifiers\n"); } else { if (ctrl->vs >= NVME_VS(1, 1, 0) && !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); if (ctrl->vs >= NVME_VS(1, 2, 0) && !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); } kfree(id); return 0; } static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { struct nvme_id_ns_cs_indep *id; struct nvme_command c = { .identify.opcode = nvme_admin_identify, .identify.nsid = cpu_to_le32(info->nsid), .identify.cns = NVME_ID_CNS_NS_CS_INDEP, }; int ret; id = kmalloc(sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); if (!ret) { info->anagrpid = id->anagrpid; info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; info->is_ready = id->nstat & NVME_NSTAT_NRDY; } kfree(id); return ret; } static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result) { union nvme_result res = { 0 }; struct nvme_command c = { }; int ret; c.features.opcode = op; c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, NVME_QID_ANY, 0, 0); if (ret >= 0 && result) *result = le32_to_cpu(res.u32); return ret; } int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result) { return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, buflen, result); } EXPORT_SYMBOL_GPL(nvme_set_features); int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result) { return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, buflen, result); } EXPORT_SYMBOL_GPL(nvme_get_features); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) { u32 q_count = (*count - 1) | ((*count - 1) << 16); u32 result; int status, nr_io_queues; status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, &result); if (status < 0) return status; /* * Degraded controllers might return an error when setting the queue * count. We still want to be able to bring them online and offer * access to the admin queue, as that might be only way to fix them up. */ if (status > 0) { dev_err(ctrl->device, "Could not set queue count (%d)\n", status); *count = 0; } else { nr_io_queues = min(result & 0xffff, result >> 16) + 1; *count = min(*count, nr_io_queues); } return 0; } EXPORT_SYMBOL_GPL(nvme_set_queue_count); #define NVME_AEN_SUPPORTED \ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) static void nvme_enable_aen(struct nvme_ctrl *ctrl) { u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; int status; if (!supported_aens) return; status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, NULL, 0, &result); if (status) dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", supported_aens); queue_work(nvme_wq, &ctrl->async_event_work); } static int nvme_ns_open(struct nvme_ns *ns) { /* should never be called due to GENHD_FL_HIDDEN */ if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) goto fail; if (!nvme_get_ns(ns)) goto fail; if (!try_module_get(ns->ctrl->ops->module)) goto fail_put_ns; return 0; fail_put_ns: nvme_put_ns(ns); fail: return -ENXIO; } static void nvme_ns_release(struct nvme_ns *ns) { module_put(ns->ctrl->ops->module); nvme_put_ns(ns); } static int nvme_open(struct gendisk *disk, blk_mode_t mode) { return nvme_ns_open(disk->private_data); } static void nvme_release(struct gendisk *disk) { nvme_ns_release(disk->private_data); } int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) { /* some standard values */ geo->heads = 1 << 6; geo->sectors = 1 << 5; geo->cylinders = get_capacity(bdev->bd_disk) >> 11; return 0; } #ifdef CONFIG_BLK_DEV_INTEGRITY static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, u32 max_integrity_segments) { struct blk_integrity integrity = { }; switch (ns->pi_type) { case NVME_NS_DPS_PI_TYPE3: switch (ns->guard_type) { case NVME_NVM_NS_16B_GUARD: integrity.profile = &t10_pi_type3_crc; integrity.tag_size = sizeof(u16) + sizeof(u32); integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; case NVME_NVM_NS_64B_GUARD: integrity.profile = &ext_pi_type3_crc64; integrity.tag_size = sizeof(u16) + 6; integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; default: integrity.profile = NULL; break; } break; case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: switch (ns->guard_type) { case NVME_NVM_NS_16B_GUARD: integrity.profile = &t10_pi_type1_crc; integrity.tag_size = sizeof(u16); integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; case NVME_NVM_NS_64B_GUARD: integrity.profile = &ext_pi_type1_crc64; integrity.tag_size = sizeof(u16); integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; default: integrity.profile = NULL; break; } break; default: integrity.profile = NULL; break; } integrity.tuple_size = ns->ms; blk_integrity_register(disk, &integrity); blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); } #else static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, u32 max_integrity_segments) { } #endif /* CONFIG_BLK_DEV_INTEGRITY */ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) { struct nvme_ctrl *ctrl = ns->ctrl; struct request_queue *queue = disk->queue; u32 size = queue_logical_block_size(queue); if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl); if (ctrl->max_discard_sectors == 0) { blk_queue_max_discard_sectors(queue, 0); return; } BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < NVME_DSM_MAX_RANGES); queue->limits.discard_granularity = size; /* If discard is already enabled, don't reset queue limits */ if (queue->limits.max_discard_sectors) return; blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) { return uuid_equal(&a->uuid, &b->uuid) && memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && a->csi == b->csi; } static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) { bool first = id->dps & NVME_NS_DPS_PI_FIRST; unsigned lbaf = nvme_lbaf_index(id->flbas); struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_command c = { }; struct nvme_id_ns_nvm *nvm; int ret = 0; u32 elbaf; ns->pi_size = 0; ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { ns->pi_size = sizeof(struct t10_pi_tuple); ns->guard_type = NVME_NVM_NS_16B_GUARD; goto set_pi; } nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); if (!nvm) return -ENOMEM; c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(ns->head->ns_id); c.identify.cns = NVME_ID_CNS_CS_NS; c.identify.csi = NVME_CSI_NVM; ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); if (ret) goto free_data; elbaf = le32_to_cpu(nvm->elbaf[lbaf]); /* no support for storage tag formats right now */ if (nvme_elbaf_sts(elbaf)) goto free_data; ns->guard_type = nvme_elbaf_guard_type(elbaf); switch (ns->guard_type) { case NVME_NVM_NS_64B_GUARD: ns->pi_size = sizeof(struct crc64_pi_tuple); break; case NVME_NVM_NS_16B_GUARD: ns->pi_size = sizeof(struct t10_pi_tuple); break; default: break; } free_data: kfree(nvm); set_pi: if (ns->pi_size && (first || ns->ms == ns->pi_size)) ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; else ns->pi_type = 0; return ret; } static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) { struct nvme_ctrl *ctrl = ns->ctrl; if (nvme_init_ms(ns, id)) return; ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) return; if (ctrl->ops->flags & NVME_F_FABRICS) { /* * The NVMe over Fabrics specification only supports metadata as * part of the extended data LBA. We rely on HCA/HBA support to * remap the separate metadata buffer from the block layer. */ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) return; ns->features |= NVME_NS_EXT_LBAS; /* * The current fabrics transport drivers support namespace * metadata formats only if nvme_ns_has_pi() returns true. * Suppress support for all other formats so the namespace will * have a 0 capacity and not be usable through the block stack. * * Note, this check will need to be modified if any drivers * gain the ability to use other metadata formats. */ if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) ns->features |= NVME_NS_METADATA_SUPPORTED; } else { /* * For PCIe controllers, we can't easily remap the separate * metadata buffer from the block layer and thus require a * separate metadata buffer for block layer metadata/PI support. * We allow extended LBAs for the passthrough interface, though. */ if (id->flbas & NVME_NS_FLBAS_META_EXT) ns->features |= NVME_NS_EXT_LBAS; else ns->features |= NVME_NS_METADATA_SUPPORTED; } } static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, struct request_queue *q) { bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; if (ctrl->max_hw_sectors) { u32 max_segments = (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; max_segments = min_not_zero(max_segments, ctrl->max_segments); blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); } blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); blk_queue_dma_alignment(q, 3); blk_queue_write_cache(q, vwc, vwc); } static void nvme_update_disk_info(struct gendisk *disk, struct nvme_ns *ns, struct nvme_id_ns *id) { sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); u32 bs = 1U << ns->lba_shift; u32 atomic_bs, phys_bs, io_opt = 0; /* * The block layer can't support LBA sizes larger than the page size * yet, so catch this early and don't allow block I/O. */ if (ns->lba_shift > PAGE_SHIFT) { capacity = 0; bs = (1 << 9); } blk_integrity_unregister(disk); atomic_bs = phys_bs = bs; if (id->nabo == 0) { /* * Bit 1 indicates whether NAWUPF is defined for this namespace * and whether it should be used instead of AWUPF. If NAWUPF == * 0 then AWUPF must be used instead. */ if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; else atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; } if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { /* NPWG = Namespace Preferred Write Granularity */ phys_bs = bs * (1 + le16_to_cpu(id->npwg)); /* NOWS = Namespace Optimal Write Size */ io_opt = bs * (1 + le16_to_cpu(id->nows)); } blk_queue_logical_block_size(disk->queue, bs); /* * Linux filesystems assume writing a single physical block is * an atomic operation. Hence limit the physical block size to the * value of the Atomic Write Unit Power Fail parameter. */ blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); blk_queue_io_min(disk->queue, phys_bs); blk_queue_io_opt(disk->queue, io_opt); /* * Register a metadata profile for PI, or the plain non-integrity NVMe * metadata masquerading as Type 0 if supported, otherwise reject block * I/O to namespaces with metadata except when the namespace supports * PI, as it can strip/insert in that case. */ if (ns->ms) { if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && (ns->features & NVME_NS_METADATA_SUPPORTED)) nvme_init_integrity(disk, ns, ns->ctrl->max_integrity_segments); else if (!nvme_ns_has_pi(ns)) capacity = 0; } set_capacity_and_notify(disk, capacity); nvme_config_discard(disk, ns); blk_queue_max_write_zeroes_sectors(disk->queue, ns->ctrl->max_zeroes_sectors); } static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) { return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); } static inline bool nvme_first_scan(struct gendisk *disk) { /* nvme_alloc_ns() scans the disk prior to adding it */ return !disk_live(disk); } static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) { struct nvme_ctrl *ctrl = ns->ctrl; u32 iob; if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && is_power_of_2(ctrl->max_hw_sectors)) iob = ctrl->max_hw_sectors; else iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); if (!iob) return; if (!is_power_of_2(iob)) { if (nvme_first_scan(ns->disk)) pr_warn("%s: ignoring unaligned IO boundary:%u\n", ns->disk->disk_name, iob); return; } if (blk_queue_is_zoned(ns->disk->queue)) { if (nvme_first_scan(ns->disk)) pr_warn("%s: ignoring zoned namespace IO boundary\n", ns->disk->disk_name); return; } blk_queue_chunk_sectors(ns->queue, iob); } static int nvme_update_ns_info_generic(struct nvme_ns *ns, struct nvme_ns_info *info) { blk_mq_freeze_queue(ns->disk->queue); nvme_set_queue_limits(ns->ctrl, ns->queue); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); blk_mq_unfreeze_queue(ns->disk->queue); if (nvme_ns_head_multipath(ns->head)) { blk_mq_freeze_queue(ns->head->disk->queue); set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); nvme_mpath_revalidate_paths(ns); blk_stack_limits(&ns->head->disk->queue->limits, &ns->queue->limits, 0); ns->head->disk->flags |= GENHD_FL_HIDDEN; blk_mq_unfreeze_queue(ns->head->disk->queue); } /* Hide the block-interface for these devices */ ns->disk->flags |= GENHD_FL_HIDDEN; set_bit(NVME_NS_READY, &ns->flags); return 0; } static int nvme_update_ns_info_block(struct nvme_ns *ns, struct nvme_ns_info *info) { struct nvme_id_ns *id; unsigned lbaf; int ret; ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); if (ret) return ret; blk_mq_freeze_queue(ns->disk->queue); lbaf = nvme_lbaf_index(id->flbas); ns->lba_shift = id->lbaf[lbaf].ds; nvme_set_queue_limits(ns->ctrl, ns->queue); nvme_configure_metadata(ns, id); nvme_set_chunk_sectors(ns, id); nvme_update_disk_info(ns->disk, ns, id); if (ns->head->ids.csi == NVME_CSI_ZNS) { ret = nvme_update_zone_info(ns, lbaf); if (ret) { blk_mq_unfreeze_queue(ns->disk->queue); goto out; } } /* * Only set the DEAC bit if the device guarantees that reads from * deallocated data return zeroes. While the DEAC bit does not * require that, it must be a no-op if reads from deallocated data * do not return zeroes. */ if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) ns->features |= NVME_NS_DEAC; set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue); if (blk_queue_is_zoned(ns->queue)) { ret = nvme_revalidate_zones(ns); if (ret && !nvme_first_scan(ns->disk)) goto out; } if (nvme_ns_head_multipath(ns->head)) { blk_mq_freeze_queue(ns->head->disk->queue); nvme_update_disk_info(ns->head->disk, ns, id); set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); nvme_mpath_revalidate_paths(ns); blk_stack_limits(&ns->head->disk->queue->limits, &ns->queue->limits, 0); disk_update_readahead(ns->head->disk); blk_mq_unfreeze_queue(ns->head->disk->queue); } ret = 0; out: /* * If probing fails due an unsupported feature, hide the block device, * but still allow other access. */ if (ret == -ENODEV) { ns->disk->flags |= GENHD_FL_HIDDEN; set_bit(NVME_NS_READY, &ns->flags); ret = 0; } kfree(id); return ret; } static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) { switch (info->ids.csi) { case NVME_CSI_ZNS: if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { dev_info(ns->ctrl->device, "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", info->nsid); return nvme_update_ns_info_generic(ns, info); } return nvme_update_ns_info_block(ns, info); case NVME_CSI_NVM: return nvme_update_ns_info_block(ns, info); default: dev_info(ns->ctrl->device, "block device for nsid %u not supported (csi %u)\n", info->nsid, info->ids.csi); return nvme_update_ns_info_generic(ns, info); } } #ifdef CONFIG_BLK_SED_OPAL static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) { struct nvme_ctrl *ctrl = data; struct nvme_command cmd = { }; if (send) cmd.common.opcode = nvme_admin_security_send; else cmd.common.opcode = nvme_admin_security_recv; cmd.common.nsid = 0; cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); cmd.common.cdw11 = cpu_to_le32(len); return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, NVME_QID_ANY, 1, 0); } static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) { if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { if (!ctrl->opal_dev) ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit); else if (was_suspended) opal_unlock_from_suspend(ctrl->opal_dev); } else { free_opal_dev(ctrl->opal_dev); ctrl->opal_dev = NULL; } } #else static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) { } #endif /* CONFIG_BLK_SED_OPAL */ #ifdef CONFIG_BLK_DEV_ZONED static int nvme_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) { return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, data); } #else #define nvme_report_zones NULL #endif /* CONFIG_BLK_DEV_ZONED */ const struct block_device_operations nvme_bdev_ops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .open = nvme_open, .release = nvme_release, .getgeo = nvme_getgeo, .report_zones = nvme_report_zones, .pr_ops = &nvme_pr_ops, }; static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val, u32 timeout, const char *op) { unsigned long timeout_jiffies = jiffies + timeout * HZ; u32 csts; int ret; while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { if (csts == ~0) return -ENODEV; if ((csts & mask) == val) break; usleep_range(1000, 2000); if (fatal_signal_pending(current)) return -EINTR; if (time_after(jiffies, timeout_jiffies)) { dev_err(ctrl->device, "Device not ready; aborting %s, CSTS=0x%x\n", op, csts); return -ENODEV; } } return ret; } int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown) { int ret; ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; if (shutdown) ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; else ctrl->ctrl_config &= ~NVME_CC_ENABLE; ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); if (ret) return ret; if (shutdown) { return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK, NVME_CSTS_SHST_CMPLT, ctrl->shutdown_timeout, "shutdown"); } if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) msleep(NVME_QUIRK_DELAY_AMOUNT); return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0, (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset"); } EXPORT_SYMBOL_GPL(nvme_disable_ctrl); int nvme_enable_ctrl(struct nvme_ctrl *ctrl) { unsigned dev_page_min; u32 timeout; int ret; ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); if (ret) { dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); return ret; } dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { dev_err(ctrl->device, "Minimum device page size %u too large for host (%u)\n", 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); return -ENODEV; } if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) ctrl->ctrl_config = NVME_CC_CSS_CSI; else ctrl->ctrl_config = NVME_CC_CSS_NVM; if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) ctrl->ctrl_config |= NVME_CC_CRIME; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); if (ret) return ret; /* Flush write to device (required if transport is PCI) */ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); if (ret) return ret; /* CAP value may change after initial CC write */ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); if (ret) return ret; timeout = NVME_CAP_TIMEOUT(ctrl->cap); if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { u32 crto, ready_timeout; ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); if (ret) { dev_err(ctrl->device, "Reading CRTO failed (%d)\n", ret); return ret; } /* * CRTO should always be greater or equal to CAP.TO, but some * devices are known to get this wrong. Use the larger of the * two values. */ if (ctrl->ctrl_config & NVME_CC_CRIME) ready_timeout = NVME_CRTO_CRIMT(crto); else ready_timeout = NVME_CRTO_CRWMT(crto); if (ready_timeout < timeout) dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", crto, ctrl->cap); else timeout = ready_timeout; } ctrl->ctrl_config |= NVME_CC_ENABLE; ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); if (ret) return ret; return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY, (timeout + 1) / 2, "initialisation"); } EXPORT_SYMBOL_GPL(nvme_enable_ctrl); static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) { __le64 ts; int ret; if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) return 0; ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), NULL); if (ret) dev_warn_once(ctrl->device, "could not set timestamp (%d)\n", ret); return ret; } static int nvme_configure_host_options(struct nvme_ctrl *ctrl) { struct nvme_feat_host_behavior *host; u8 acre = 0, lbafee = 0; int ret; /* Don't bother enabling the feature if retry delay is not reported */ if (ctrl->crdt[0]) acre = NVME_ENABLE_ACRE; if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) lbafee = NVME_ENABLE_LBAFEE; if (!acre && !lbafee) return 0; host = kzalloc(sizeof(*host), GFP_KERNEL); if (!host) return 0; host->acre = acre; host->lbafee = lbafee; ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, host, sizeof(*host), NULL); kfree(host); return ret; } /* * The function checks whether the given total (exlat + enlat) latency of * a power state allows the latter to be used as an APST transition target. * It does so by comparing the latency to the primary and secondary latency * tolerances defined by module params. If there's a match, the corresponding * timeout value is returned and the matching tolerance index (1 or 2) is * reported. */ static bool nvme_apst_get_transition_time(u64 total_latency, u64 *transition_time, unsigned *last_index) { if (total_latency <= apst_primary_latency_tol_us) { if (*last_index == 1) return false; *last_index = 1; *transition_time = apst_primary_timeout_ms; return true; } if (apst_secondary_timeout_ms && total_latency <= apst_secondary_latency_tol_us) { if (*last_index <= 2) return false; *last_index = 2; *transition_time = apst_secondary_timeout_ms; return true; } return false; } /* * APST (Autonomous Power State Transition) lets us program a table of power * state transitions that the controller will perform automatically. * * Depending on module params, one of the two supported techniques will be used: * * - If the parameters provide explicit timeouts and tolerances, they will be * used to build a table with up to 2 non-operational states to transition to. * The default parameter values were selected based on the values used by * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic * regeneration of the APST table in the event of switching between external * and battery power, the timeouts and tolerances reflect a compromise * between values used by Microsoft for AC and battery scenarios. * - If not, we'll configure the table with a simple heuristic: we are willing * to spend at most 2% of the time transitioning between power states. * Therefore, when running in any given state, we will enter the next * lower-power non-operational state after waiting 50 * (enlat + exlat) * microseconds, as long as that state's exit latency is under the requested * maximum latency. * * We will not autonomously enter any non-operational state for which the total * latency exceeds ps_max_latency_us. * * Users can set ps_max_latency_us to zero to turn off APST. */ static int nvme_configure_apst(struct nvme_ctrl *ctrl) { struct nvme_feat_auto_pst *table; unsigned apste = 0; u64 max_lat_us = 0; __le64 target = 0; int max_ps = -1; int state; int ret; unsigned last_lt_index = UINT_MAX; /* * If APST isn't supported or if we haven't been initialized yet, * then don't do anything. */ if (!ctrl->apsta) return 0; if (ctrl->npss > 31) { dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); return 0; } table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) return 0; if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { /* Turn off APST. */ dev_dbg(ctrl->device, "APST disabled\n"); goto done; } /* * Walk through all states from lowest- to highest-power. * According to the spec, lower-numbered states use more power. NPSS, * despite the name, is the index of the lowest-power state, not the * number of states. */ for (state = (int)ctrl->npss; state >= 0; state--) { u64 total_latency_us, exit_latency_us, transition_ms; if (target) table->entries[state] = target; /* * Don't allow transitions to the deepest state if it's quirked * off. */ if (state == ctrl->npss && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) continue; /* * Is this state a useful non-operational state for higher-power * states to autonomously transition to? */ if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) continue; exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); if (exit_latency_us > ctrl->ps_max_latency_us) continue; total_latency_us = exit_latency_us + le32_to_cpu(ctrl->psd[state].entry_lat); /* * This state is good. It can be used as the APST idle target * for higher power states. */ if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { if (!nvme_apst_get_transition_time(total_latency_us, &transition_ms, &last_lt_index)) continue; } else { transition_ms = total_latency_us + 19; do_div(transition_ms, 20); if (transition_ms > (1 << 24) - 1) transition_ms = (1 << 24) - 1; } target = cpu_to_le64((state << 3) | (transition_ms << 8)); if (max_ps == -1) max_ps = state; if (total_latency_us > max_lat_us) max_lat_us = total_latency_us; } if (max_ps == -1) dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); else dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", max_ps, max_lat_us, (int)sizeof(*table), table); apste = 1; done: ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, table, sizeof(*table), NULL); if (ret) dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); kfree(table); return ret; } static void nvme_set_latency_tolerance(struct device *dev, s32 val) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); u64 latency; switch (val) { case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: case PM_QOS_LATENCY_ANY: latency = U64_MAX; break; default: latency = val; } if (ctrl->ps_max_latency_us != latency) { ctrl->ps_max_latency_us = latency; if (ctrl->state == NVME_CTRL_LIVE) nvme_configure_apst(ctrl); } } struct nvme_core_quirk_entry { /* * NVMe model and firmware strings are padded with spaces. For * simplicity, strings in the quirk table are padded with NULLs * instead. */ u16 vid; const char *mn; const char *fr; unsigned long quirks; }; static const struct nvme_core_quirk_entry core_quirks[] = { { /* * This Toshiba device seems to die using any APST states. See: * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 */ .vid = 0x1179, .mn = "THNSF5256GPUK TOSHIBA", .quirks = NVME_QUIRK_NO_APST, }, { /* * This LiteON CL1-3D*-Q11 firmware version has a race * condition associated with actions related to suspend to idle * LiteON has resolved the problem in future firmware */ .vid = 0x14a4, .fr = "22301111", .quirks = NVME_QUIRK_SIMPLE_SUSPEND, }, { /* * This Kioxia CD6-V Series / HPE PE8030 device times out and * aborts I/O during any load, but more easily reproducible * with discards (fstrim). * * The device is left in a state where it is also not possible * to use "nvme set-feature" to disable APST, but booting with * nvme_core.default_ps_max_latency=0 works. */ .vid = 0x1e0f, .mn = "KCD6XVUL6T40", .quirks = NVME_QUIRK_NO_APST, }, { /* * The external Samsung X5 SSD fails initialization without a * delay before checking if it is ready and has a whole set of * other problems. To make this even more interesting, it * shares the PCI ID with internal Samsung 970 Evo Plus that * does not need or want these quirks. */ .vid = 0x144d, .mn = "Samsung Portable SSD X5", .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, } }; /* match is null-terminated but idstr is space-padded. */ static bool string_matches(const char *idstr, const char *match, size_t len) { size_t matchlen; if (!match) return true; matchlen = strlen(match); WARN_ON_ONCE(matchlen > len); if (memcmp(idstr, match, matchlen)) return false; for (; matchlen < len; matchlen++) if (idstr[matchlen] != ' ') return false; return true; } static bool quirk_matches(const struct nvme_id_ctrl *id, const struct nvme_core_quirk_entry *q) { return q->vid == le16_to_cpu(id->vid) && string_matches(id->mn, q->mn, sizeof(id->mn)) && string_matches(id->fr, q->fr, sizeof(id->fr)); } static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { size_t nqnlen; int off; if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); return; } if (ctrl->vs >= NVME_VS(1, 2, 1)) dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); } /* * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe * Base Specification 2.0. It is slightly different from the format * specified there due to historic reasons, and we can't change it now. */ off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, "nqn.2014.08.org.nvmexpress:%04x%04x", le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); off += sizeof(id->sn); memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); off += sizeof(id->mn); memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); } static void nvme_release_subsystem(struct device *dev) { struct nvme_subsystem *subsys = container_of(dev, struct nvme_subsystem, dev); if (subsys->instance >= 0) ida_free(&nvme_instance_ida, subsys->instance); kfree(subsys); } static void nvme_destroy_subsystem(struct kref *ref) { struct nvme_subsystem *subsys = container_of(ref, struct nvme_subsystem, ref); mutex_lock(&nvme_subsystems_lock); list_del(&subsys->entry); mutex_unlock(&nvme_subsystems_lock); ida_destroy(&subsys->ns_ida); device_del(&subsys->dev); put_device(&subsys->dev); } static void nvme_put_subsystem(struct nvme_subsystem *subsys) { kref_put(&subsys->ref, nvme_destroy_subsystem); } static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) { struct nvme_subsystem *subsys; lockdep_assert_held(&nvme_subsystems_lock); /* * Fail matches for discovery subsystems. This results * in each discovery controller bound to a unique subsystem. * This avoids issues with validating controller values * that can only be true when there is a single unique subsystem. * There may be multiple and completely independent entities * that provide discovery controllers. */ if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) return NULL; list_for_each_entry(subsys, &nvme_subsystems, entry) { if (strcmp(subsys->subnqn, subsysnqn)) continue; if (!kref_get_unless_zero(&subsys->ref)) continue; return subsys; } return NULL; } static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) { return ctrl->opts && ctrl->opts->discovery_nqn; } static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { struct nvme_ctrl *tmp; lockdep_assert_held(&nvme_subsystems_lock); list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { if (nvme_state_terminal(tmp)) continue; if (tmp->cntlid == ctrl->cntlid) { dev_err(ctrl->device, "Duplicate cntlid %u with %s, subsys %s, rejecting\n", ctrl->cntlid, dev_name(tmp->device), subsys->subnqn); return false; } if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || nvme_discovery_ctrl(ctrl)) continue; dev_err(ctrl->device, "Subsystem does not support multiple controllers\n"); return false; } return true; } static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { struct nvme_subsystem *subsys, *found; int ret; subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); if (!subsys) return -ENOMEM; subsys->instance = -1; mutex_init(&subsys->lock); kref_init(&subsys->ref); INIT_LIST_HEAD(&subsys->ctrls); INIT_LIST_HEAD(&subsys->nsheads); nvme_init_subnqn(subsys, ctrl, id); memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); memcpy(subsys->model, id->mn, sizeof(subsys->model)); subsys->vendor_id = le16_to_cpu(id->vid); subsys->cmic = id->cmic; /* Versions prior to 1.4 don't necessarily report a valid type */ if (id->cntrltype == NVME_CTRL_DISC || !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) subsys->subtype = NVME_NQN_DISC; else subsys->subtype = NVME_NQN_NVME; if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { dev_err(ctrl->device, "Subsystem %s is not a discovery controller", subsys->subnqn); kfree(subsys); return -EINVAL; } subsys->awupf = le16_to_cpu(id->awupf); nvme_mpath_default_iopolicy(subsys); subsys->dev.class = nvme_subsys_class; subsys->dev.release = nvme_release_subsystem; subsys->dev.groups = nvme_subsys_attrs_groups; dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); device_initialize(&subsys->dev); mutex_lock(&nvme_subsystems_lock); found = __nvme_find_get_subsystem(subsys->subnqn); if (found) { put_device(&subsys->dev); subsys = found; if (!nvme_validate_cntlid(subsys, ctrl, id)) { ret = -EINVAL; goto out_put_subsystem; } } else { ret = device_add(&subsys->dev); if (ret) { dev_err(ctrl->device, "failed to register subsystem device.\n"); put_device(&subsys->dev); goto out_unlock; } ida_init(&subsys->ns_ida); list_add_tail(&subsys->entry, &nvme_subsystems); } ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, dev_name(ctrl->device)); if (ret) { dev_err(ctrl->device, "failed to create sysfs link from subsystem.\n"); goto out_put_subsystem; } if (!found) subsys->instance = ctrl->instance; ctrl->subsys = subsys; list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); mutex_unlock(&nvme_subsystems_lock); return 0; out_put_subsystem: nvme_put_subsystem(subsys); out_unlock: mutex_unlock(&nvme_subsystems_lock); return ret; } int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, void *log, size_t size, u64 offset) { struct nvme_command c = { }; u32 dwlen = nvme_bytes_to_numd(size); c.get_log_page.opcode = nvme_admin_get_log_page; c.get_log_page.nsid = cpu_to_le32(nsid); c.get_log_page.lid = log_page; c.get_log_page.lsp = lsp; c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); c.get_log_page.csi = csi; return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); } static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, struct nvme_effects_log **log) { struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); int ret; if (cel) goto out; cel = kzalloc(sizeof(*cel), GFP_KERNEL); if (!cel) return -ENOMEM; ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, cel, sizeof(*cel), 0); if (ret) { kfree(cel); return ret; } xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); out: *log = cel; return 0; } static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) { u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; if (check_shl_overflow(1U, units + page_shift - 9, &val)) return UINT_MAX; return val; } static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) { struct nvme_command c = { }; struct nvme_id_ctrl_nvm *id; int ret; if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { ctrl->max_discard_sectors = UINT_MAX; ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; } else { ctrl->max_discard_sectors = 0; ctrl->max_discard_segments = 0; } /* * Even though NVMe spec explicitly states that MDTS is not applicable * to the write-zeroes, we are cautious and limit the size to the * controllers max_hw_sectors value, which is based on the MDTS field * and possibly other limiting factors. */ if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; else ctrl->max_zeroes_sectors = 0; if (ctrl->subsys->subtype != NVME_NQN_NVME || nvme_ctrl_limited_cns(ctrl) || test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) return 0; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; c.identify.opcode = nvme_admin_identify; c.identify.cns = NVME_ID_CNS_CS_CTRL; c.identify.csi = NVME_CSI_NVM; ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); if (ret) goto free_data; if (id->dmrl) ctrl->max_discard_segments = id->dmrl; ctrl->dmrsl = le32_to_cpu(id->dmrsl); if (id->wzsl) ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); free_data: if (ret > 0) set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags); kfree(id); return ret; } static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) { struct nvme_effects_log *log = ctrl->effects; log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC | NVME_CMD_EFFECTS_CSE_MASK); log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK); /* * The spec says the result of a security receive command depends on * the previous security send command. As such, many vendors log this * command as one to submitted only when no other commands to the same * namespace are outstanding. The intention is to tell the host to * prevent mixing security send and receive. * * This driver can only enforce such exclusive access against IO * queues, though. We are not readily able to enforce such a rule for * two commands to the admin queue, which is the only queue that * matters for this command. * * Rather than blindly freezing the IO queues for this effect that * doesn't even apply to IO, mask it off. */ log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK); log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); } static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { int ret = 0; if (ctrl->effects) return 0; if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); if (ret < 0) return ret; } if (!ctrl->effects) { ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); if (!ctrl->effects) return -ENOMEM; xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL); } nvme_init_known_nvm_effects(ctrl); return 0; } static int nvme_init_identify(struct nvme_ctrl *ctrl) { struct nvme_id_ctrl *id; u32 max_hw_sectors; bool prev_apst_enabled; int ret; ret = nvme_identify_ctrl(ctrl, &id); if (ret) { dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); return -EIO; } if (!(ctrl->ops->flags & NVME_F_FABRICS)) ctrl->cntlid = le16_to_cpu(id->cntlid); if (!ctrl->identified) { unsigned int i; /* * Check for quirks. Quirk can depend on firmware version, * so, in principle, the set of quirks present can change * across a reset. As a possible future enhancement, we * could re-scan for quirks every time we reinitialize * the device, but we'd have to make sure that the driver * behaves intelligently if the quirks change. */ for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { if (quirk_matches(id, &core_quirks[i])) ctrl->quirks |= core_quirks[i].quirks; } ret = nvme_init_subsystem(ctrl, id); if (ret) goto out_free; ret = nvme_init_effects(ctrl, id); if (ret) goto out_free; } memcpy(ctrl->subsys->firmware_rev, id->fr, sizeof(ctrl->subsys->firmware_rev)); if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; } ctrl->crdt[0] = le16_to_cpu(id->crdt1); ctrl->crdt[1] = le16_to_cpu(id->crdt2); ctrl->crdt[2] = le16_to_cpu(id->crdt3); ctrl->oacs = le16_to_cpu(id->oacs); ctrl->oncs = le16_to_cpu(id->oncs); ctrl->mtfa = le16_to_cpu(id->mtfa); ctrl->oaes = le32_to_cpu(id->oaes); ctrl->wctemp = le16_to_cpu(id->wctemp); ctrl->cctemp = le16_to_cpu(id->cctemp); atomic_set(&ctrl->abort_limit, id->acl + 1); ctrl->vwc = id->vwc; if (id->mdts) max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); else max_hw_sectors = UINT_MAX; ctrl->max_hw_sectors = min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); nvme_set_queue_limits(ctrl, ctrl->admin_q); ctrl->sgls = le32_to_cpu(id->sgls); ctrl->kas = le16_to_cpu(id->kas); ctrl->max_namespaces = le32_to_cpu(id->mnan); ctrl->ctratt = le32_to_cpu(id->ctratt); ctrl->cntrltype = id->cntrltype; ctrl->dctype = id->dctype; if (id->rtd3e) { /* us -> s */ u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, shutdown_timeout, 60); if (ctrl->shutdown_timeout != shutdown_timeout) dev_info(ctrl->device, "Shutdown timeout set to %u seconds\n", ctrl->shutdown_timeout); } else ctrl->shutdown_timeout = shutdown_timeout; ctrl->npss = id->npss; ctrl->apsta = id->apsta; prev_apst_enabled = ctrl->apst_enabled; if (ctrl->quirks & NVME_QUIRK_NO_APST) { if (force_apst && id->apsta) { dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); ctrl->apst_enabled = true; } else { ctrl->apst_enabled = false; } } else { ctrl->apst_enabled = id->apsta; } memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); if (ctrl->ops->flags & NVME_F_FABRICS) { ctrl->icdoff = le16_to_cpu(id->icdoff); ctrl->ioccsz = le32_to_cpu(id->ioccsz); ctrl->iorcsz = le32_to_cpu(id->iorcsz); ctrl->maxcmd = le16_to_cpu(id->maxcmd); /* * In fabrics we need to verify the cntlid matches the * admin connect */ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { dev_err(ctrl->device, "Mismatching cntlid: Connect %u vs Identify " "%u, rejecting\n", ctrl->cntlid, le16_to_cpu(id->cntlid)); ret = -EINVAL; goto out_free; } if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { dev_err(ctrl->device, "keep-alive support is mandatory for fabrics\n"); ret = -EINVAL; goto out_free; } } else { ctrl->hmpre = le32_to_cpu(id->hmpre); ctrl->hmmin = le32_to_cpu(id->hmmin); ctrl->hmminds = le32_to_cpu(id->hmminds); ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); } ret = nvme_mpath_init_identify(ctrl, id); if (ret < 0) goto out_free; if (ctrl->apst_enabled && !prev_apst_enabled) dev_pm_qos_expose_latency_tolerance(ctrl->device); else if (!ctrl->apst_enabled && prev_apst_enabled) dev_pm_qos_hide_latency_tolerance(ctrl->device); out_free: kfree(id); return ret; } /* * Initialize the cached copies of the Identify data and various controller * register in our nvme_ctrl structure. This should be called as soon as * the admin queue is fully up and running. */ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) { int ret; ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); if (ret) { dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); return ret; } ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); if (ctrl->vs >= NVME_VS(1, 1, 0)) ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); ret = nvme_init_identify(ctrl); if (ret) return ret; ret = nvme_configure_apst(ctrl); if (ret < 0) return ret; ret = nvme_configure_timestamp(ctrl); if (ret < 0) return ret; ret = nvme_configure_host_options(ctrl); if (ret < 0) return ret; nvme_configure_opal(ctrl, was_suspended); if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { /* * Do not return errors unless we are in a controller reset, * the controller works perfectly fine without hwmon. */ ret = nvme_hwmon_init(ctrl); if (ret == -EINTR) return ret; } clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); ctrl->identified = true; return 0; } EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); static int nvme_dev_open(struct inode *inode, struct file *file) { struct nvme_ctrl *ctrl = container_of(inode->i_cdev, struct nvme_ctrl, cdev); switch (ctrl->state) { case NVME_CTRL_LIVE: break; default: return -EWOULDBLOCK; } nvme_get_ctrl(ctrl); if (!try_module_get(ctrl->ops->module)) { nvme_put_ctrl(ctrl); return -EINVAL; } file->private_data = ctrl; return 0; } static int nvme_dev_release(struct inode *inode, struct file *file) { struct nvme_ctrl *ctrl = container_of(inode->i_cdev, struct nvme_ctrl, cdev); module_put(ctrl->ops->module); nvme_put_ctrl(ctrl); return 0; } static const struct file_operations nvme_dev_fops = { .owner = THIS_MODULE, .open = nvme_dev_open, .release = nvme_dev_release, .unlocked_ioctl = nvme_dev_ioctl, .compat_ioctl = compat_ptr_ioctl, .uring_cmd = nvme_dev_uring_cmd, }; static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns_head *h; lockdep_assert_held(&ctrl->subsys->lock); list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { /* * Private namespaces can share NSIDs under some conditions. * In that case we can't use the same ns_head for namespaces * with the same NSID. */ if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) continue; if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) return h; } return NULL; } static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, struct nvme_ns_ids *ids) { bool has_uuid = !uuid_is_null(&ids->uuid); bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); struct nvme_ns_head *h; lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) { if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) return -EINVAL; if (has_nguid && memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) return -EINVAL; if (has_eui64 && memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) return -EINVAL; } return 0; } static void nvme_cdev_rel(struct device *dev) { ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); } void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) { cdev_device_del(cdev, cdev_device); put_device(cdev_device); } int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, const struct file_operations *fops, struct module *owner) { int minor, ret; minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL); if (minor < 0) return minor; cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); cdev_device->class = nvme_ns_chr_class; cdev_device->release = nvme_cdev_rel; device_initialize(cdev_device); cdev_init(cdev, fops); cdev->owner = owner; ret = cdev_device_add(cdev, cdev_device); if (ret) put_device(cdev_device); return ret; } static int nvme_ns_chr_open(struct inode *inode, struct file *file) { return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); } static int nvme_ns_chr_release(struct inode *inode, struct file *file) { nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); return 0; } static const struct file_operations nvme_ns_chr_fops = { .owner = THIS_MODULE, .open = nvme_ns_chr_open, .release = nvme_ns_chr_release, .unlocked_ioctl = nvme_ns_chr_ioctl, .compat_ioctl = compat_ptr_ioctl, .uring_cmd = nvme_ns_chr_uring_cmd, .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, }; static int nvme_add_ns_cdev(struct nvme_ns *ns) { int ret; ns->cdev_device.parent = ns->ctrl->device; ret = dev_set_name(&ns->cdev_device, "ng%dn%d", ns->ctrl->instance, ns->head->instance); if (ret) return ret; return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, ns->ctrl->ops->module); } static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { struct nvme_ns_head *head; size_t size = sizeof(*head); int ret = -ENOMEM; #ifdef CONFIG_NVME_MULTIPATH size += num_possible_nodes() * sizeof(struct nvme_ns *); #endif head = kzalloc(size, GFP_KERNEL); if (!head) goto out; ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); if (ret < 0) goto out_free_head; head->instance = ret; INIT_LIST_HEAD(&head->list); ret = init_srcu_struct(&head->srcu); if (ret) goto out_ida_remove; head->subsys = ctrl->subsys; head->ns_id = info->nsid; head->ids = info->ids; head->shared = info->is_shared; kref_init(&head->ref); if (head->ids.csi) { ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); if (ret) goto out_cleanup_srcu; } else head->effects = ctrl->effects; ret = nvme_mpath_alloc_disk(ctrl, head); if (ret) goto out_cleanup_srcu; list_add_tail(&head->entry, &ctrl->subsys->nsheads); kref_get(&ctrl->subsys->ref); return head; out_cleanup_srcu: cleanup_srcu_struct(&head->srcu); out_ida_remove: ida_free(&ctrl->subsys->ns_ida, head->instance); out_free_head: kfree(head); out: if (ret > 0) ret = blk_status_to_errno(nvme_error_status(ret)); return ERR_PTR(ret); } static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, struct nvme_ns_ids *ids) { struct nvme_subsystem *s; int ret = 0; /* * Note that this check is racy as we try to avoid holding the global * lock over the whole ns_head creation. But it is only intended as * a sanity check anyway. */ mutex_lock(&nvme_subsystems_lock); list_for_each_entry(s, &nvme_subsystems, entry) { if (s == this) continue; mutex_lock(&s->lock); ret = nvme_subsys_check_duplicate_ids(s, ids); mutex_unlock(&s->lock); if (ret) break; } mutex_unlock(&nvme_subsystems_lock); return ret; } static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) { struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_ns_head *head = NULL; int ret; ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); if (ret) { /* * We've found two different namespaces on two different * subsystems that report the same ID. This is pretty nasty * for anything that actually requires unique device * identification. In the kernel we need this for multipathing, * and in user space the /dev/disk/by-id/ links rely on it. * * If the device also claims to be multi-path capable back off * here now and refuse the probe the second device as this is a * recipe for data corruption. If not this is probably a * cheap consumer device if on the PCIe bus, so let the user * proceed and use the shiny toy, but warn that with changing * probing order (which due to our async probing could just be * device taking longer to startup) the other device could show * up at any time. */ nvme_print_device_info(ctrl); if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */ ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) && info->is_shared)) { dev_err(ctrl->device, "ignoring nsid %d because of duplicate IDs\n", info->nsid); return ret; } dev_err(ctrl->device, "clearing duplicate IDs for nsid %d\n", info->nsid); dev_err(ctrl->device, "use of /dev/disk/by-id/ may cause data corruption\n"); memset(&info->ids.nguid, 0, sizeof(info->ids.nguid)); memset(&info->ids.uuid, 0, sizeof(info->ids.uuid)); memset(&info->ids.eui64, 0, sizeof(info->ids.eui64)); ctrl->quirks |= NVME_QUIRK_BOGUS_NID; } mutex_lock(&ctrl->subsys->lock); head = nvme_find_ns_head(ctrl, info->nsid); if (!head) { ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); if (ret) { dev_err(ctrl->device, "duplicate IDs in subsystem for nsid %d\n", info->nsid); goto out_unlock; } head = nvme_alloc_ns_head(ctrl, info); if (IS_ERR(head)) { ret = PTR_ERR(head); goto out_unlock; } } else { ret = -EINVAL; if (!info->is_shared || !head->shared) { dev_err(ctrl->device, "Duplicate unshared namespace %d\n", info->nsid); goto out_put_ns_head; } if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { dev_err(ctrl->device, "IDs don't match for shared namespace %d\n", info->nsid); goto out_put_ns_head; } if (!multipath) { dev_warn(ctrl->device, "Found shared namespace %d, but multipathing not supported.\n", info->nsid); dev_warn_once(ctrl->device, "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); } } list_add_tail_rcu(&ns->siblings, &head->list); ns->head = head; mutex_unlock(&ctrl->subsys->lock); return 0; out_put_ns_head: nvme_put_ns_head(head); out_unlock: mutex_unlock(&ctrl->subsys->lock); return ret; } struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns, *ret = NULL; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { if (ns->head->ns_id == nsid) { if (!nvme_get_ns(ns)) continue; ret = ns; break; } if (ns->head->ns_id > nsid) break; } up_read(&ctrl->namespaces_rwsem); return ret; } EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); /* * Add the namespace to the controller list while keeping the list ordered. */ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) { struct nvme_ns *tmp; list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { if (tmp->head->ns_id < ns->head->ns_id) { list_add(&ns->list, &tmp->list); return; } } list_add(&ns->list, &ns->ctrl->namespaces); } static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { struct nvme_ns *ns; struct gendisk *disk; int node = ctrl->numa_node; ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); if (!ns) return; disk = blk_mq_alloc_disk(ctrl->tagset, ns); if (IS_ERR(disk)) goto out_free_ns; disk->fops = &nvme_bdev_ops; disk->private_data = ns; ns->disk = disk; ns->queue = disk->queue; if (ctrl->opts && ctrl->opts->data_digest) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); if (ctrl->ops->supports_pci_p2pdma && ctrl->ops->supports_pci_p2pdma(ctrl)) blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); ns->ctrl = ctrl; kref_init(&ns->kref); if (nvme_init_ns_head(ns, info)) goto out_cleanup_disk; /* * If multipathing is enabled, the device name for all disks and not * just those that represent shared namespaces needs to be based on the * subsystem instance. Using the controller instance for private * namespaces could lead to naming collisions between shared and private * namespaces if they don't use a common numbering scheme. * * If multipathing is not enabled, disk names must use the controller * instance as shared namespaces will show up as multiple block * devices. */ if (nvme_ns_head_multipath(ns->head)) { sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, ctrl->instance, ns->head->instance); disk->flags |= GENHD_FL_HIDDEN; } else if (multipath) { sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, ns->head->instance); } else { sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); } if (nvme_update_ns_info(ns, info)) goto out_unlink_ns; down_write(&ctrl->namespaces_rwsem); nvme_ns_add_to_ctrl_list(ns); up_write(&ctrl->namespaces_rwsem); nvme_get_ctrl(ctrl); if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) goto out_cleanup_ns_from_list; if (!nvme_ns_head_multipath(ns->head)) nvme_add_ns_cdev(ns); nvme_mpath_add_disk(ns, info->anagrpid); nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); return; out_cleanup_ns_from_list: nvme_put_ctrl(ctrl); down_write(&ctrl->namespaces_rwsem); list_del_init(&ns->list); up_write(&ctrl->namespaces_rwsem); out_unlink_ns: mutex_lock(&ctrl->subsys->lock); list_del_rcu(&ns->siblings); if (list_empty(&ns->head->list)) list_del_init(&ns->head->entry); mutex_unlock(&ctrl->subsys->lock); nvme_put_ns_head(ns->head); out_cleanup_disk: put_disk(disk); out_free_ns: kfree(ns); } static void nvme_ns_remove(struct nvme_ns *ns) { bool last_path = false; if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) return; clear_bit(NVME_NS_READY, &ns->flags); set_capacity(ns->disk, 0); nvme_fault_inject_fini(&ns->fault_inject); /* * Ensure that !NVME_NS_READY is seen by other threads to prevent * this ns going back into current_path. */ synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ if (nvme_mpath_clear_current_path(ns)) synchronize_srcu(&ns->head->srcu); mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); if (list_empty(&ns->head->list)) { list_del_init(&ns->head->entry); last_path = true; } mutex_unlock(&ns->ctrl->subsys->lock); /* guarantee not available in head->list */ synchronize_srcu(&ns->head->srcu); if (!nvme_ns_head_multipath(ns->head)) nvme_cdev_del(&ns->cdev, &ns->cdev_device); del_gendisk(ns->disk); down_write(&ns->ctrl->namespaces_rwsem); list_del_init(&ns->list); up_write(&ns->ctrl->namespaces_rwsem); if (last_path) nvme_mpath_shutdown_disk(ns->head); nvme_put_ns(ns); } static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) { struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); if (ns) { nvme_ns_remove(ns); nvme_put_ns(ns); } } static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) { int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { dev_err(ns->ctrl->device, "identifiers changed for nsid %d\n", ns->head->ns_id); goto out; } ret = nvme_update_ns_info(ns, info); out: /* * Only remove the namespace if we got a fatal error back from the * device, otherwise ignore the error and just move on. * * TODO: we should probably schedule a delayed retry here. */ if (ret > 0 && (ret & NVME_SC_DNR)) nvme_ns_remove(ns); } static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns_info info = { .nsid = nsid }; struct nvme_ns *ns; int ret; if (nvme_identify_ns_descs(ctrl, &info)) return; if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { dev_warn(ctrl->device, "command set not reported for nsid: %d\n", nsid); return; } /* * If available try to use the Command Set Idependent Identify Namespace * data structure to find all the generic information that is needed to * set up a namespace. If not fall back to the legacy version. */ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); else ret = nvme_ns_info_from_identify(ctrl, &info); if (info.is_removed) nvme_ns_remove_by_nsid(ctrl, nsid); /* * Ignore the namespace if it is not ready. We will get an AEN once it * becomes ready and restart the scan. */ if (ret || !info.is_ready) return; ns = nvme_find_get_ns(ctrl, nsid); if (ns) { nvme_validate_ns(ns, &info); nvme_put_ns(ns); } else { nvme_alloc_ns(ctrl, &info); } } static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns, *next; LIST_HEAD(rm_list); down_write(&ctrl->namespaces_rwsem); list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { if (ns->head->ns_id > nsid) list_move_tail(&ns->list, &rm_list); } up_write(&ctrl->namespaces_rwsem); list_for_each_entry_safe(ns, next, &rm_list, list) nvme_ns_remove(ns); } static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) { const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); __le32 *ns_list; u32 prev = 0; int ret = 0, i; ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); if (!ns_list) return -ENOMEM; for (;;) { struct nvme_command cmd = { .identify.opcode = nvme_admin_identify, .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, .identify.nsid = cpu_to_le32(prev), }; ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, NVME_IDENTIFY_DATA_SIZE); if (ret) { dev_warn(ctrl->device, "Identify NS List failed (status=0x%x)\n", ret); goto free; } for (i = 0; i < nr_entries; i++) { u32 nsid = le32_to_cpu(ns_list[i]); if (!nsid) /* end of the list? */ goto out; nvme_scan_ns(ctrl, nsid); while (++prev < nsid) nvme_ns_remove_by_nsid(ctrl, prev); } } out: nvme_remove_invalid_namespaces(ctrl, prev); free: kfree(ns_list); return ret; } static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) { struct nvme_id_ctrl *id; u32 nn, i; if (nvme_identify_ctrl(ctrl, &id)) return; nn = le32_to_cpu(id->nn); kfree(id); for (i = 1; i <= nn; i++) nvme_scan_ns(ctrl, i); nvme_remove_invalid_namespaces(ctrl, nn); } static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) { size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); __le32 *log; int error; log = kzalloc(log_size, GFP_KERNEL); if (!log) return; /* * We need to read the log to clear the AEN, but we don't want to rely * on it for the changed namespace information as userspace could have * raced with us in reading the log page, which could cause us to miss * updates. */ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, NVME_CSI_NVM, log, log_size, 0); if (error) dev_warn(ctrl->device, "reading changed ns log failed: %d\n", error); kfree(log); } static void nvme_scan_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, scan_work); int ret; /* No tagset on a live ctrl means IO queues could not created */ if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) return; /* * Identify controller limits can change at controller reset due to * new firmware download, even though it is not common we cannot ignore * such scenario. Controller's non-mdts limits are reported in the unit * of logical blocks that is dependent on the format of attached * namespace. Hence re-read the limits at the time of ns allocation. */ ret = nvme_init_non_mdts_limits(ctrl); if (ret < 0) { dev_warn(ctrl->device, "reading non-mdts-limits failed: %d\n", ret); return; } if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { dev_info(ctrl->device, "rescanning namespaces.\n"); nvme_clear_changed_ns_log(ctrl); } mutex_lock(&ctrl->scan_lock); if (nvme_ctrl_limited_cns(ctrl)) { nvme_scan_ns_sequential(ctrl); } else { /* * Fall back to sequential scan if DNR is set to handle broken * devices which should support Identify NS List (as per the VS * they report) but don't actually support it. */ ret = nvme_scan_ns_list(ctrl); if (ret > 0 && ret & NVME_SC_DNR) nvme_scan_ns_sequential(ctrl); } mutex_unlock(&ctrl->scan_lock); } /* * This function iterates the namespace list unlocked to allow recovery from * controller failure. It is up to the caller to ensure the namespace list is * not modified by scan work while this function is executing. */ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) { struct nvme_ns *ns, *next; LIST_HEAD(ns_list); /* * make sure to requeue I/O to all namespaces as these * might result from the scan itself and must complete * for the scan_work to make progress */ nvme_mpath_clear_ctrl_paths(ctrl); /* * Unquiesce io queues so any pending IO won't hang, especially * those submitted from scan work */ nvme_unquiesce_io_queues(ctrl); /* prevent racing with ns scanning */ flush_work(&ctrl->scan_work); /* * The dead states indicates the controller was not gracefully * disconnected. In that case, we won't be able to flush any data while * removing the namespaces' disks; fail all the queues now to avoid * potentially having to clean up the failed sync later. */ if (ctrl->state == NVME_CTRL_DEAD) nvme_mark_namespaces_dead(ctrl); /* this is a no-op when called from the controller reset handler */ nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); down_write(&ctrl->namespaces_rwsem); list_splice_init(&ctrl->namespaces, &ns_list); up_write(&ctrl->namespaces_rwsem); list_for_each_entry_safe(ns, next, &ns_list, list) nvme_ns_remove(ns); } EXPORT_SYMBOL_GPL(nvme_remove_namespaces); static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct nvme_ctrl *ctrl = container_of(dev, struct nvme_ctrl, ctrl_device); struct nvmf_ctrl_options *opts = ctrl->opts; int ret; ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); if (ret) return ret; if (opts) { ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); if (ret) return ret; ret = add_uevent_var(env, "NVME_TRSVCID=%s", opts->trsvcid ?: "none"); if (ret) return ret; ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", opts->host_traddr ?: "none"); if (ret) return ret; ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", opts->host_iface ?: "none"); } return ret; } static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) { char *envp[2] = { envdata, NULL }; kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); } static void nvme_aen_uevent(struct nvme_ctrl *ctrl) { char *envp[2] = { NULL, NULL }; u32 aen_result = ctrl->aen_result; ctrl->aen_result = 0; if (!aen_result) return; envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); if (!envp[0]) return; kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); kfree(envp[0]); } static void nvme_async_event_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, async_event_work); nvme_aen_uevent(ctrl); /* * The transport drivers must guarantee AER submission here is safe by * flushing ctrl async_event_work after changing the controller state * from LIVE and before freeing the admin queue. */ if (ctrl->state == NVME_CTRL_LIVE) ctrl->ops->submit_async_event(ctrl); } static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) { u32 csts; if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) return false; if (csts == ~0) return false; return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); } static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) { struct nvme_fw_slot_info_log *log; log = kmalloc(sizeof(*log), GFP_KERNEL); if (!log) return; if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, log, sizeof(*log), 0)) dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); kfree(log); } static void nvme_fw_act_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, fw_act_work); unsigned long fw_act_timeout; if (ctrl->mtfa) fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100); else fw_act_timeout = jiffies + msecs_to_jiffies(admin_timeout * 1000); nvme_quiesce_io_queues(ctrl); while (nvme_ctrl_pp_status(ctrl)) { if (time_after(jiffies, fw_act_timeout)) { dev_warn(ctrl->device, "Fw activation timeout, reset controller\n"); nvme_try_sched_reset(ctrl); return; } msleep(100); } if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) return; nvme_unquiesce_io_queues(ctrl); /* read FW slot information to clear the AER */ nvme_get_fw_slot_info(ctrl); queue_work(nvme_wq, &ctrl->async_event_work); } static u32 nvme_aer_type(u32 result) { return result & 0x7; } static u32 nvme_aer_subtype(u32 result) { return (result & 0xff00) >> 8; } static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) { u32 aer_notice_type = nvme_aer_subtype(result); bool requeue = true; switch (aer_notice_type) { case NVME_AER_NOTICE_NS_CHANGED: set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); nvme_queue_scan(ctrl); break; case NVME_AER_NOTICE_FW_ACT_STARTING: /* * We are (ab)using the RESETTING state to prevent subsequent * recovery actions from interfering with the controller's * firmware activation. */ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { nvme_auth_stop(ctrl); requeue = false; queue_work(nvme_wq, &ctrl->fw_act_work); } break; #ifdef CONFIG_NVME_MULTIPATH case NVME_AER_NOTICE_ANA: if (!ctrl->ana_log_buf) break; queue_work(nvme_wq, &ctrl->ana_work); break; #endif case NVME_AER_NOTICE_DISC_CHANGED: ctrl->aen_result = result; break; default: dev_warn(ctrl->device, "async event result %08x\n", result); } return requeue; } static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) { dev_warn(ctrl->device, "resetting controller due to AER\n"); nvme_reset_ctrl(ctrl); } void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, volatile union nvme_result *res) { u32 result = le32_to_cpu(res->u32); u32 aer_type = nvme_aer_type(result); u32 aer_subtype = nvme_aer_subtype(result); bool requeue = true; if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) return; trace_nvme_async_event(ctrl, result); switch (aer_type) { case NVME_AER_NOTICE: requeue = nvme_handle_aen_notice(ctrl, result); break; case NVME_AER_ERROR: /* * For a persistent internal error, don't run async_event_work * to submit a new AER. The controller reset will do it. */ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { nvme_handle_aer_persistent_error(ctrl); return; } fallthrough; case NVME_AER_SMART: case NVME_AER_CSS: case NVME_AER_VS: ctrl->aen_result = result; break; default: break; } if (requeue) queue_work(nvme_wq, &ctrl->async_event_work); } EXPORT_SYMBOL_GPL(nvme_complete_async_event); int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int cmd_size) { int ret; memset(set, 0, sizeof(*set)); set->ops = ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; if (ctrl->ops->flags & NVME_F_FABRICS) set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = ctrl->numa_node; set->flags = BLK_MQ_F_NO_SCHED; if (ctrl->ops->flags & NVME_F_BLOCKING) set->flags |= BLK_MQ_F_BLOCKING; set->cmd_size = cmd_size; set->driver_data = ctrl; set->nr_hw_queues = 1; set->timeout = NVME_ADMIN_TIMEOUT; ret = blk_mq_alloc_tag_set(set); if (ret) return ret; ctrl->admin_q = blk_mq_init_queue(set); if (IS_ERR(ctrl->admin_q)) { ret = PTR_ERR(ctrl->admin_q); goto out_free_tagset; } if (ctrl->ops->flags & NVME_F_FABRICS) { ctrl->fabrics_q = blk_mq_init_queue(set); if (IS_ERR(ctrl->fabrics_q)) { ret = PTR_ERR(ctrl->fabrics_q); goto out_cleanup_admin_q; } } ctrl->admin_tagset = set; return 0; out_cleanup_admin_q: blk_mq_destroy_queue(ctrl->admin_q); blk_put_queue(ctrl->admin_q); out_free_tagset: blk_mq_free_tag_set(set); ctrl->admin_q = NULL; ctrl->fabrics_q = NULL; return ret; } EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) { blk_mq_destroy_queue(ctrl->admin_q); blk_put_queue(ctrl->admin_q); if (ctrl->ops->flags & NVME_F_FABRICS) { blk_mq_destroy_queue(ctrl->fabrics_q); blk_put_queue(ctrl->fabrics_q); } blk_mq_free_tag_set(ctrl->admin_tagset); } EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int nr_maps, unsigned int cmd_size) { int ret; memset(set, 0, sizeof(*set)); set->ops = ops; set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); /* * Some Apple controllers requires tags to be unique across admin and * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. */ if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) set->reserved_tags = NVME_AQ_DEPTH; else if (ctrl->ops->flags & NVME_F_FABRICS) set->reserved_tags = NVMF_RESERVED_TAGS; set->numa_node = ctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE; if (ctrl->ops->flags & NVME_F_BLOCKING) set->flags |= BLK_MQ_F_BLOCKING; set->cmd_size = cmd_size, set->driver_data = ctrl; set->nr_hw_queues = ctrl->queue_count - 1; set->timeout = NVME_IO_TIMEOUT; set->nr_maps = nr_maps; ret = blk_mq_alloc_tag_set(set); if (ret) return ret; if (ctrl->ops->flags & NVME_F_FABRICS) { ctrl->connect_q = blk_mq_init_queue(set); if (IS_ERR(ctrl->connect_q)) { ret = PTR_ERR(ctrl->connect_q); goto out_free_tag_set; } blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, ctrl->connect_q); } ctrl->tagset = set; return 0; out_free_tag_set: blk_mq_free_tag_set(set); ctrl->connect_q = NULL; return ret; } EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) { if (ctrl->ops->flags & NVME_F_FABRICS) { blk_mq_destroy_queue(ctrl->connect_q); blk_put_queue(ctrl->connect_q); } blk_mq_free_tag_set(ctrl->tagset); } EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); void nvme_stop_ctrl(struct nvme_ctrl *ctrl) { nvme_mpath_stop(ctrl); nvme_auth_stop(ctrl); nvme_stop_keep_alive(ctrl); nvme_stop_failfast_work(ctrl); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fw_act_work); if (ctrl->ops->stop_ctrl) ctrl->ops->stop_ctrl(ctrl); } EXPORT_SYMBOL_GPL(nvme_stop_ctrl); void nvme_start_ctrl(struct nvme_ctrl *ctrl) { nvme_start_keep_alive(ctrl); nvme_enable_aen(ctrl); /* * persistent discovery controllers need to send indication to userspace * to re-read the discovery log page to learn about possible changes * that were missed. We identify persistent discovery controllers by * checking that they started once before, hence are reconnecting back. */ if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && nvme_discovery_ctrl(ctrl)) nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); if (ctrl->queue_count > 1) { nvme_queue_scan(ctrl); nvme_unquiesce_io_queues(ctrl); nvme_mpath_update(ctrl); } nvme_change_uevent(ctrl, "NVME_EVENT=connected"); set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); } EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { nvme_hwmon_exit(ctrl); nvme_fault_inject_fini(&ctrl->fault_inject); dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); nvme_put_ctrl(ctrl); } EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); static void nvme_free_cels(struct nvme_ctrl *ctrl) { struct nvme_effects_log *cel; unsigned long i; xa_for_each(&ctrl->cels, i, cel) { xa_erase(&ctrl->cels, i); kfree(cel); } xa_destroy(&ctrl->cels); } static void nvme_free_ctrl(struct device *dev) { struct nvme_ctrl *ctrl = container_of(dev, struct nvme_ctrl, ctrl_device); struct nvme_subsystem *subsys = ctrl->subsys; if (!subsys || ctrl->instance != subsys->instance) ida_free(&nvme_instance_ida, ctrl->instance); nvme_free_cels(ctrl); nvme_mpath_uninit(ctrl); nvme_auth_stop(ctrl); nvme_auth_free(ctrl); __free_page(ctrl->discard_page); free_opal_dev(ctrl->opal_dev); if (subsys) { mutex_lock(&nvme_subsystems_lock); list_del(&ctrl->subsys_entry); sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); mutex_unlock(&nvme_subsystems_lock); } ctrl->ops->free_ctrl(ctrl); if (subsys) nvme_put_subsystem(subsys); } /* * Initialize a NVMe controller structures. This needs to be called during * earliest initialization so that we have the initialized structured around * during probing. */ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, const struct nvme_ctrl_ops *ops, unsigned long quirks) { int ret; ctrl->state = NVME_CTRL_NEW; clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); spin_lock_init(&ctrl->lock); mutex_init(&ctrl->scan_lock); INIT_LIST_HEAD(&ctrl->namespaces); xa_init(&ctrl->cels); init_rwsem(&ctrl->namespaces_rwsem); ctrl->dev = dev; ctrl->ops = ops; ctrl->quirks = quirks; ctrl->numa_node = NUMA_NO_NODE; INIT_WORK(&ctrl->scan_work, nvme_scan_work); INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); init_waitqueue_head(&ctrl->state_wq); INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > PAGE_SIZE); ctrl->discard_page = alloc_page(GFP_KERNEL); if (!ctrl->discard_page) { ret = -ENOMEM; goto out; } ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL); if (ret < 0) goto out; ctrl->instance = ret; device_initialize(&ctrl->ctrl_device); ctrl->device = &ctrl->ctrl_device; ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), ctrl->instance); ctrl->device->class = nvme_class; ctrl->device->parent = ctrl->dev; if (ops->dev_attr_groups) ctrl->device->groups = ops->dev_attr_groups; else ctrl->device->groups = nvme_dev_attr_groups; ctrl->device->release = nvme_free_ctrl; dev_set_drvdata(ctrl->device, ctrl); ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); if (ret) goto out_release_instance; nvme_get_ctrl(ctrl); cdev_init(&ctrl->cdev, &nvme_dev_fops); ctrl->cdev.owner = ops->module; ret = cdev_device_add(&ctrl->cdev, ctrl->device); if (ret) goto out_free_name; /* * Initialize latency tolerance controls. The sysfs files won't * be visible to userspace unless the device actually supports APST. */ ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; dev_pm_qos_update_user_latency_tolerance(ctrl->device, min(default_ps_max_latency_us, (unsigned long)S32_MAX)); nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); nvme_mpath_init_ctrl(ctrl); ret = nvme_auth_init_ctrl(ctrl); if (ret) goto out_free_cdev; return 0; out_free_cdev: nvme_fault_inject_fini(&ctrl->fault_inject); dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); out_free_name: nvme_put_ctrl(ctrl); kfree_const(ctrl->device->kobj.name); out_release_instance: ida_free(&nvme_instance_ida, ctrl->instance); out: if (ctrl->discard_page) __free_page(ctrl->discard_page); return ret; } EXPORT_SYMBOL_GPL(nvme_init_ctrl); /* let I/O to all namespaces fail in preparation for surprise removal */ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_mark_disk_dead(ns->disk); up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); void nvme_unfreeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_mq_unfreeze_queue(ns->queue); up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_unfreeze); int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); if (timeout <= 0) break; } up_read(&ctrl->namespaces_rwsem); return timeout; } EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); void nvme_wait_freeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_mq_freeze_queue_wait(ns->queue); up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_wait_freeze); void nvme_start_freeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_freeze_queue_start(ns->queue); up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_start_freeze); void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl) { if (!ctrl->tagset) return; if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags)) blk_mq_quiesce_tagset(ctrl->tagset); else blk_mq_wait_quiesce_done(ctrl->tagset); } EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues); void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl) { if (!ctrl->tagset) return; if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags)) blk_mq_unquiesce_tagset(ctrl->tagset); } EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues); void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl) { if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) blk_mq_quiesce_queue(ctrl->admin_q); else blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set); } EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue); void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl) { if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) blk_mq_unquiesce_queue(ctrl->admin_q); } EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); void nvme_sync_io_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_sync_queue(ns->queue); up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_sync_io_queues); void nvme_sync_queues(struct nvme_ctrl *ctrl) { nvme_sync_io_queues(ctrl); if (ctrl->admin_q) blk_sync_queue(ctrl->admin_q); } EXPORT_SYMBOL_GPL(nvme_sync_queues); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) { if (file->f_op != &nvme_dev_fops) return NULL; return file->private_data; } EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); /* * Check we didn't inadvertently grow the command structure sizes: */ static inline void _nvme_check_size(void) { BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); BUILD_BUG_ON(sizeof(struct nvme_features) != 64); BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); } static int __init nvme_core_init(void) { int result = -ENOMEM; _nvme_check_size(); nvme_wq = alloc_workqueue("nvme-wq", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); if (!nvme_wq) goto out; nvme_reset_wq = alloc_workqueue("nvme-reset-wq", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); if (!nvme_reset_wq) goto destroy_wq; nvme_delete_wq = alloc_workqueue("nvme-delete-wq", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); if (!nvme_delete_wq) goto destroy_reset_wq; result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, NVME_MINORS, "nvme"); if (result < 0) goto destroy_delete_wq; nvme_class = class_create("nvme"); if (IS_ERR(nvme_class)) { result = PTR_ERR(nvme_class); goto unregister_chrdev; } nvme_class->dev_uevent = nvme_class_uevent; nvme_subsys_class = class_create("nvme-subsystem"); if (IS_ERR(nvme_subsys_class)) { result = PTR_ERR(nvme_subsys_class); goto destroy_class; } result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, "nvme-generic"); if (result < 0) goto destroy_subsys_class; nvme_ns_chr_class = class_create("nvme-generic"); if (IS_ERR(nvme_ns_chr_class)) { result = PTR_ERR(nvme_ns_chr_class); goto unregister_generic_ns; } result = nvme_init_auth(); if (result) goto destroy_ns_chr; return 0; destroy_ns_chr: class_destroy(nvme_ns_chr_class); unregister_generic_ns: unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); destroy_subsys_class: class_destroy(nvme_subsys_class); destroy_class: class_destroy(nvme_class); unregister_chrdev: unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); destroy_delete_wq: destroy_workqueue(nvme_delete_wq); destroy_reset_wq: destroy_workqueue(nvme_reset_wq); destroy_wq: destroy_workqueue(nvme_wq); out: return result; } static void __exit nvme_core_exit(void) { nvme_exit_auth(); class_destroy(nvme_ns_chr_class); class_destroy(nvme_subsys_class); class_destroy(nvme_class); unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); destroy_workqueue(nvme_delete_wq); destroy_workqueue(nvme_reset_wq); destroy_workqueue(nvme_wq); ida_destroy(&nvme_ns_chr_minor_ida); ida_destroy(&nvme_instance_ida); } MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); module_init(nvme_core_init); module_exit(nvme_core_exit);
linux-master
drivers/nvme/host/core.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2011-2014, Intel Corporation. * Copyright (c) 2017-2021 Christoph Hellwig. */ #include <linux/ptrace.h> /* for force_successful_syscall_return */ #include <linux/nvme_ioctl.h> #include <linux/io_uring.h> #include "nvme.h" enum { NVME_IOCTL_VEC = (1 << 0), NVME_IOCTL_PARTITION = (1 << 1), }; static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, unsigned int flags, bool open_for_write) { u32 effects; if (capable(CAP_SYS_ADMIN)) return true; /* * Do not allow unprivileged passthrough on partitions, as that allows an * escape from the containment of the partition. */ if (flags & NVME_IOCTL_PARTITION) return false; /* * Do not allow unprivileged processes to send vendor specific or fabrics * commands as we can't be sure about their effects. */ if (c->common.opcode >= nvme_cmd_vendor_start || c->common.opcode == nvme_fabrics_command) return false; /* * Do not allow unprivileged passthrough of admin commands except * for a subset of identify commands that contain information required * to form proper I/O commands in userspace and do not expose any * potentially sensitive information. */ if (!ns) { if (c->common.opcode == nvme_admin_identify) { switch (c->identify.cns) { case NVME_ID_CNS_NS: case NVME_ID_CNS_CS_NS: case NVME_ID_CNS_NS_CS_INDEP: case NVME_ID_CNS_CS_CTRL: case NVME_ID_CNS_CTRL: return true; } } return false; } /* * Check if the controller provides a Commands Supported and Effects log * and marks this command as supported. If not reject unprivileged * passthrough. */ effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode); if (!(effects & NVME_CMD_EFFECTS_CSUPP)) return false; /* * Don't allow passthrough for command that have intrusive (or unknown) * effects. */ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_UUID_SEL | NVME_CMD_EFFECTS_SCOPE_MASK)) return false; /* * Only allow I/O commands that transfer data to the controller or that * change the logical block contents if the file descriptor is open for * writing. */ if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) return open_for_write; return true; } /* * Convert integer values from ioctl structures to user pointers, silently * ignoring the upper bits in the compat case to match behaviour of 32-bit * kernels. */ static void __user *nvme_to_user_ptr(uintptr_t ptrval) { if (in_compat_syscall()) ptrval = (compat_uptr_t)ptrval; return (void __user *)ptrval; } static void *nvme_add_user_metadata(struct request *req, void __user *ubuf, unsigned len, u32 seed) { struct bio_integrity_payload *bip; int ret = -ENOMEM; void *buf; struct bio *bio = req->bio; buf = kmalloc(len, GFP_KERNEL); if (!buf) goto out; ret = -EFAULT; if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len)) goto out_free_meta; bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); if (IS_ERR(bip)) { ret = PTR_ERR(bip); goto out_free_meta; } bip->bip_iter.bi_sector = seed; ret = bio_integrity_add_page(bio, virt_to_page(buf), len, offset_in_page(buf)); if (ret != len) { ret = -ENOMEM; goto out_free_meta; } req->cmd_flags |= REQ_INTEGRITY; return buf; out_free_meta: kfree(buf); out: return ERR_PTR(ret); } static int nvme_finish_user_metadata(struct request *req, void __user *ubuf, void *meta, unsigned len, int ret) { if (!ret && req_op(req) == REQ_OP_DRV_IN && copy_to_user(ubuf, meta, len)) ret = -EFAULT; kfree(meta); return ret; } static struct request *nvme_alloc_user_request(struct request_queue *q, struct nvme_command *cmd, blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags) { struct request *req; req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); if (IS_ERR(req)) return req; nvme_init_request(req, cmd); nvme_req(req)->flags |= NVME_REQ_USERCMD; return req; } static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd, unsigned int flags) { struct request_queue *q = req->q; struct nvme_ns *ns = q->queuedata; struct block_device *bdev = ns ? ns->disk->part0 : NULL; struct bio *bio = NULL; void *meta = NULL; int ret; if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { struct iov_iter iter; /* fixedbufs is only for non-vectored io */ if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) return -EINVAL; ret = io_uring_cmd_import_fixed(ubuffer, bufflen, rq_data_dir(req), &iter, ioucmd); if (ret < 0) goto out; ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL); } else { ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer), bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0, 0, rq_data_dir(req)); } if (ret) goto out; bio = req->bio; if (bdev) bio_set_dev(bio, bdev); if (bdev && meta_buffer && meta_len) { meta = nvme_add_user_metadata(req, meta_buffer, meta_len, meta_seed); if (IS_ERR(meta)) { ret = PTR_ERR(meta); goto out_unmap; } *metap = meta; } return ret; out_unmap: if (bio) blk_rq_unmap_user(bio); out: blk_mq_free_request(req); return ret; } static int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, u64 *result, unsigned timeout, unsigned int flags) { struct nvme_ns *ns = q->queuedata; struct nvme_ctrl *ctrl; struct request *req; void *meta = NULL; struct bio *bio; u32 effects; int ret; req = nvme_alloc_user_request(q, cmd, 0, 0); if (IS_ERR(req)) return PTR_ERR(req); req->timeout = timeout; if (ubuffer && bufflen) { ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, meta_len, meta_seed, &meta, NULL, flags); if (ret) return ret; } bio = req->bio; ctrl = nvme_req(req)->ctrl; effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); ret = nvme_execute_rq(req, false); if (result) *result = le64_to_cpu(nvme_req(req)->result.u64); if (meta) ret = nvme_finish_user_metadata(req, meta_buffer, meta, meta_len, ret); if (bio) blk_rq_unmap_user(bio); blk_mq_free_request(req); if (effects) nvme_passthru_end(ctrl, ns, effects, cmd, ret); return ret; } static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) { struct nvme_user_io io; struct nvme_command c; unsigned length, meta_len; void __user *metadata; if (copy_from_user(&io, uio, sizeof(io))) return -EFAULT; if (io.flags) return -EINVAL; switch (io.opcode) { case nvme_cmd_write: case nvme_cmd_read: case nvme_cmd_compare: break; default: return -EINVAL; } length = (io.nblocks + 1) << ns->lba_shift; if ((io.control & NVME_RW_PRINFO_PRACT) && ns->ms == sizeof(struct t10_pi_tuple)) { /* * Protection information is stripped/inserted by the * controller. */ if (nvme_to_user_ptr(io.metadata)) return -EINVAL; meta_len = 0; metadata = NULL; } else { meta_len = (io.nblocks + 1) * ns->ms; metadata = nvme_to_user_ptr(io.metadata); } if (ns->features & NVME_NS_EXT_LBAS) { length += meta_len; meta_len = 0; } else if (meta_len) { if ((io.metadata & 3) || !io.metadata) return -EINVAL; } memset(&c, 0, sizeof(c)); c.rw.opcode = io.opcode; c.rw.flags = io.flags; c.rw.nsid = cpu_to_le32(ns->head->ns_id); c.rw.slba = cpu_to_le64(io.slba); c.rw.length = cpu_to_le16(io.nblocks); c.rw.control = cpu_to_le16(io.control); c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); c.rw.reftag = cpu_to_le32(io.reftag); c.rw.apptag = cpu_to_le16(io.apptag); c.rw.appmask = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, meta_len, lower_32_bits(io.slba), NULL, 0, 0); } static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, struct nvme_ns *ns, __u32 nsid) { if (ns && nsid != ns->head->ns_id) { dev_err(ctrl->device, "%s: nsid (%u) in cmd does not match nsid (%u)" "of namespace\n", current->comm, nsid, ns->head->ns_id); return false; } return true; } static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_passthru_cmd __user *ucmd, unsigned int flags, bool open_for_write) { struct nvme_passthru_cmd cmd; struct nvme_command c; unsigned timeout = 0; u64 result; int status; if (copy_from_user(&cmd, ucmd, sizeof(cmd))) return -EFAULT; if (cmd.flags) return -EINVAL; if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) return -EINVAL; memset(&c, 0, sizeof(c)); c.common.opcode = cmd.opcode; c.common.flags = cmd.flags; c.common.nsid = cpu_to_le32(cmd.nsid); c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); c.common.cdw10 = cpu_to_le32(cmd.cdw10); c.common.cdw11 = cpu_to_le32(cmd.cdw11); c.common.cdw12 = cpu_to_le32(cmd.cdw12); c.common.cdw13 = cpu_to_le32(cmd.cdw13); c.common.cdw14 = cpu_to_le32(cmd.cdw14); c.common.cdw15 = cpu_to_le32(cmd.cdw15); if (!nvme_cmd_allowed(ns, &c, 0, open_for_write)) return -EACCES; if (cmd.timeout_ms) timeout = msecs_to_jiffies(cmd.timeout_ms); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 0, &result, timeout, 0); if (status >= 0) { if (put_user(result, &ucmd->result)) return -EFAULT; } return status; } static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags, bool open_for_write) { struct nvme_passthru_cmd64 cmd; struct nvme_command c; unsigned timeout = 0; int status; if (copy_from_user(&cmd, ucmd, sizeof(cmd))) return -EFAULT; if (cmd.flags) return -EINVAL; if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) return -EINVAL; memset(&c, 0, sizeof(c)); c.common.opcode = cmd.opcode; c.common.flags = cmd.flags; c.common.nsid = cpu_to_le32(cmd.nsid); c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); c.common.cdw10 = cpu_to_le32(cmd.cdw10); c.common.cdw11 = cpu_to_le32(cmd.cdw11); c.common.cdw12 = cpu_to_le32(cmd.cdw12); c.common.cdw13 = cpu_to_le32(cmd.cdw13); c.common.cdw14 = cpu_to_le32(cmd.cdw14); c.common.cdw15 = cpu_to_le32(cmd.cdw15); if (!nvme_cmd_allowed(ns, &c, flags, open_for_write)) return -EACCES; if (cmd.timeout_ms) timeout = msecs_to_jiffies(cmd.timeout_ms); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 0, &cmd.result, timeout, flags); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) return -EFAULT; } return status; } struct nvme_uring_data { __u64 metadata; __u64 addr; __u32 data_len; __u32 metadata_len; __u32 timeout_ms; }; /* * This overlays struct io_uring_cmd pdu. * Expect build errors if this grows larger than that. */ struct nvme_uring_cmd_pdu { union { struct bio *bio; struct request *req; }; u32 meta_len; u32 nvme_status; union { struct { void *meta; /* kernel-resident buffer */ void __user *meta_buffer; }; u64 result; } u; }; static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( struct io_uring_cmd *ioucmd) { return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu; } static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd, unsigned issue_flags) { struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); struct request *req = pdu->req; int status; u64 result; if (nvme_req(req)->flags & NVME_REQ_CANCELLED) status = -EINTR; else status = nvme_req(req)->status; result = le64_to_cpu(nvme_req(req)->result.u64); if (pdu->meta_len) status = nvme_finish_user_metadata(req, pdu->u.meta_buffer, pdu->u.meta, pdu->meta_len, status); if (req->bio) blk_rq_unmap_user(req->bio); blk_mq_free_request(req); io_uring_cmd_done(ioucmd, status, result, issue_flags); } static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, unsigned issue_flags) { struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); if (pdu->bio) blk_rq_unmap_user(pdu->bio); io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags); } static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, blk_status_t err) { struct io_uring_cmd *ioucmd = req->end_io_data; struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); req->bio = pdu->bio; if (nvme_req(req)->flags & NVME_REQ_CANCELLED) pdu->nvme_status = -EINTR; else pdu->nvme_status = nvme_req(req)->status; pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64); /* * For iopoll, complete it directly. * Otherwise, move the completion to task work. */ if (blk_rq_is_poll(req)) { WRITE_ONCE(ioucmd->cookie, NULL); nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED); } else { io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); } return RQ_END_IO_FREE; } static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req, blk_status_t err) { struct io_uring_cmd *ioucmd = req->end_io_data; struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); req->bio = pdu->bio; pdu->req = req; /* * For iopoll, complete it directly. * Otherwise, move the completion to task work. */ if (blk_rq_is_poll(req)) { WRITE_ONCE(ioucmd->cookie, NULL); nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED); } else { io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb); } return RQ_END_IO_NONE; } static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) { struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); struct request_queue *q = ns ? ns->queue : ctrl->admin_q; struct nvme_uring_data d; struct nvme_command c; struct request *req; blk_opf_t rq_flags = REQ_ALLOC_CACHE; blk_mq_req_flags_t blk_flags = 0; void *meta = NULL; int ret; c.common.opcode = READ_ONCE(cmd->opcode); c.common.flags = READ_ONCE(cmd->flags); if (c.common.flags) return -EINVAL; c.common.command_id = 0; c.common.nsid = cpu_to_le32(cmd->nsid); if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid))) return -EINVAL; c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2)); c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3)); c.common.metadata = 0; c.common.dptr.prp1 = c.common.dptr.prp2 = 0; c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10)); c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11)); c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12)); c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13)); c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE)) return -EACCES; d.metadata = READ_ONCE(cmd->metadata); d.addr = READ_ONCE(cmd->addr); d.data_len = READ_ONCE(cmd->data_len); d.metadata_len = READ_ONCE(cmd->metadata_len); d.timeout_ms = READ_ONCE(cmd->timeout_ms); if (issue_flags & IO_URING_F_NONBLOCK) { rq_flags |= REQ_NOWAIT; blk_flags = BLK_MQ_REQ_NOWAIT; } if (issue_flags & IO_URING_F_IOPOLL) rq_flags |= REQ_POLLED; req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags); if (IS_ERR(req)) return PTR_ERR(req); req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0; if (d.addr && d.data_len) { ret = nvme_map_user_request(req, d.addr, d.data_len, nvme_to_user_ptr(d.metadata), d.metadata_len, 0, &meta, ioucmd, vec); if (ret) return ret; } if (blk_rq_is_poll(req)) { ioucmd->flags |= IORING_URING_CMD_POLLED; WRITE_ONCE(ioucmd->cookie, req); } /* to free bio on completion, as req->bio will be null at that time */ pdu->bio = req->bio; pdu->meta_len = d.metadata_len; req->end_io_data = ioucmd; if (pdu->meta_len) { pdu->u.meta = meta; pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata); req->end_io = nvme_uring_cmd_end_io_meta; } else { req->end_io = nvme_uring_cmd_end_io; } blk_execute_rq_nowait(req, false); return -EIOCBQUEUED; } static bool is_ctrl_ioctl(unsigned int cmd) { if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) return true; if (is_sed_ioctl(cmd)) return true; return false; } static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, void __user *argp, bool open_for_write) { switch (cmd) { case NVME_IOCTL_ADMIN_CMD: return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); case NVME_IOCTL_ADMIN64_CMD: return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); default: return sed_ioctl(ctrl->opal_dev, cmd, argp); } } #ifdef COMPAT_FOR_U64_ALIGNMENT struct nvme_user_io32 { __u8 opcode; __u8 flags; __u16 control; __u16 nblocks; __u16 rsvd; __u64 metadata; __u64 addr; __u64 slba; __u32 dsmgmt; __u32 reftag; __u16 apptag; __u16 appmask; } __attribute__((__packed__)); #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) #endif /* COMPAT_FOR_U64_ALIGNMENT */ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp, unsigned int flags, bool open_for_write) { switch (cmd) { case NVME_IOCTL_ID: force_successful_syscall_return(); return ns->head->ns_id; case NVME_IOCTL_IO_CMD: return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write); /* * struct nvme_user_io can have different padding on some 32-bit ABIs. * Just accept the compat version as all fields that are used are the * same size and at the same offset. */ #ifdef COMPAT_FOR_U64_ALIGNMENT case NVME_IOCTL_SUBMIT_IO32: #endif case NVME_IOCTL_SUBMIT_IO: return nvme_submit_io(ns, argp); case NVME_IOCTL_IO64_CMD_VEC: flags |= NVME_IOCTL_VEC; fallthrough; case NVME_IOCTL_IO64_CMD: return nvme_user_cmd64(ns->ctrl, ns, argp, flags, open_for_write); default: return -ENOTTY; } } int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct nvme_ns *ns = bdev->bd_disk->private_data; bool open_for_write = mode & BLK_OPEN_WRITE; void __user *argp = (void __user *)arg; unsigned int flags = 0; if (bdev_is_partition(bdev)) flags |= NVME_IOCTL_PARTITION; if (is_ctrl_ioctl(cmd)) return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); } long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct nvme_ns *ns = container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); bool open_for_write = file->f_mode & FMODE_WRITE; void __user *argp = (void __user *)arg; if (is_ctrl_ioctl(cmd)) return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); } static int nvme_uring_cmd_checks(unsigned int issue_flags) { /* NVMe passthrough requires big SQE/CQE support */ if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != (IO_URING_F_SQE128|IO_URING_F_CQE32)) return -EOPNOTSUPP; return 0; } static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd, unsigned int issue_flags) { struct nvme_ctrl *ctrl = ns->ctrl; int ret; BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu)); ret = nvme_uring_cmd_checks(issue_flags); if (ret) return ret; switch (ioucmd->cmd_op) { case NVME_URING_CMD_IO: ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false); break; case NVME_URING_CMD_IO_VEC: ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true); break; default: ret = -ENOTTY; } return ret; } int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) { struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev, struct nvme_ns, cdev); return nvme_ns_uring_cmd(ns, ioucmd, issue_flags); } int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, struct io_comp_batch *iob, unsigned int poll_flags) { struct request *req; int ret = 0; if (!(ioucmd->flags & IORING_URING_CMD_POLLED)) return 0; req = READ_ONCE(ioucmd->cookie); if (req && blk_rq_is_poll(req)) ret = blk_rq_poll(req, iob, poll_flags); return ret; } #ifdef CONFIG_NVME_MULTIPATH static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp, struct nvme_ns_head *head, int srcu_idx, bool open_for_write) __releases(&head->srcu) { struct nvme_ctrl *ctrl = ns->ctrl; int ret; nvme_get_ctrl(ns->ctrl); srcu_read_unlock(&head->srcu, srcu_idx); ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); nvme_put_ctrl(ctrl); return ret; } int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct nvme_ns_head *head = bdev->bd_disk->private_data; bool open_for_write = mode & BLK_OPEN_WRITE; void __user *argp = (void __user *)arg; struct nvme_ns *ns; int srcu_idx, ret = -EWOULDBLOCK; unsigned int flags = 0; if (bdev_is_partition(bdev)) flags |= NVME_IOCTL_PARTITION; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); if (!ns) goto out_unlock; /* * Handle ioctls that apply to the controller instead of the namespace * seperately and drop the ns SRCU reference early. This avoids a * deadlock when deleting namespaces using the passthrough interface. */ if (is_ctrl_ioctl(cmd)) return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, open_for_write); ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); out_unlock: srcu_read_unlock(&head->srcu, srcu_idx); return ret; } long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { bool open_for_write = file->f_mode & FMODE_WRITE; struct cdev *cdev = file_inode(file)->i_cdev; struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev); void __user *argp = (void __user *)arg; struct nvme_ns *ns; int srcu_idx, ret = -EWOULDBLOCK; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); if (!ns) goto out_unlock; if (is_ctrl_ioctl(cmd)) return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, open_for_write); ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); out_unlock: srcu_read_unlock(&head->srcu, srcu_idx); return ret; } int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) { struct cdev *cdev = file_inode(ioucmd->file)->i_cdev; struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev); int srcu_idx = srcu_read_lock(&head->srcu); struct nvme_ns *ns = nvme_find_path(head); int ret = -EINVAL; if (ns) ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags); srcu_read_unlock(&head->srcu, srcu_idx); return ret; } #endif /* CONFIG_NVME_MULTIPATH */ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) { struct nvme_ctrl *ctrl = ioucmd->file->private_data; int ret; /* IOPOLL not supported yet */ if (issue_flags & IO_URING_F_IOPOLL) return -EOPNOTSUPP; ret = nvme_uring_cmd_checks(issue_flags); if (ret) return ret; switch (ioucmd->cmd_op) { case NVME_URING_CMD_ADMIN: ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false); break; case NVME_URING_CMD_ADMIN_VEC: ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true); break; default: ret = -ENOTTY; } return ret; } static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, bool open_for_write) { struct nvme_ns *ns; int ret; down_read(&ctrl->namespaces_rwsem); if (list_empty(&ctrl->namespaces)) { ret = -ENOTTY; goto out_unlock; } ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { dev_warn(ctrl->device, "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); ret = -EINVAL; goto out_unlock; } dev_warn(ctrl->device, "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); kref_get(&ns->kref); up_read(&ctrl->namespaces_rwsem); ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write); nvme_put_ns(ns); return ret; out_unlock: up_read(&ctrl->namespaces_rwsem); return ret; } long nvme_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { bool open_for_write = file->f_mode & FMODE_WRITE; struct nvme_ctrl *ctrl = file->private_data; void __user *argp = (void __user *)arg; switch (cmd) { case NVME_IOCTL_ADMIN_CMD: return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); case NVME_IOCTL_ADMIN64_CMD: return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); case NVME_IOCTL_IO_CMD: return nvme_dev_user_cmd(ctrl, argp, open_for_write); case NVME_IOCTL_RESET: if (!capable(CAP_SYS_ADMIN)) return -EACCES; dev_warn(ctrl->device, "resetting controller\n"); return nvme_reset_ctrl_sync(ctrl); case NVME_IOCTL_SUBSYS_RESET: if (!capable(CAP_SYS_ADMIN)) return -EACCES; return nvme_reset_subsystem(ctrl); case NVME_IOCTL_RESCAN: if (!capable(CAP_SYS_ADMIN)) return -EACCES; nvme_queue_scan(ctrl); return 0; default: return -ENOTTY; } }
linux-master
drivers/nvme/host/ioctl.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics TCP host. * Copyright (c) 2018 Lightbits Labs. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/nvme-tcp.h> #include <net/sock.h> #include <net/tcp.h> #include <linux/blk-mq.h> #include <crypto/hash.h> #include <net/busy_poll.h> #include <trace/events/sock.h> #include "nvme.h" #include "fabrics.h" struct nvme_tcp_queue; /* Define the socket priority to use for connections were it is desirable * that the NIC consider performing optimized packet processing or filtering. * A non-zero value being sufficient to indicate general consideration of any * possible optimization. Making it a module param allows for alternative * values that may be unique for some NIC implementations. */ static int so_priority; module_param(so_priority, int, 0644); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); #ifdef CONFIG_DEBUG_LOCK_ALLOC /* lockdep can detect a circular dependency of the form * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock * because dependencies are tracked for both nvme-tcp and user contexts. Using * a separate class prevents lockdep from conflating nvme-tcp socket use with * user-space socket API use. */ static struct lock_class_key nvme_tcp_sk_key[2]; static struct lock_class_key nvme_tcp_slock_key[2]; static void nvme_tcp_reclassify_socket(struct socket *sock) { struct sock *sk = sock->sk; if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) return; switch (sk->sk_family) { case AF_INET: sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", &nvme_tcp_slock_key[0], "sk_lock-AF_INET-NVME", &nvme_tcp_sk_key[0]); break; case AF_INET6: sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", &nvme_tcp_slock_key[1], "sk_lock-AF_INET6-NVME", &nvme_tcp_sk_key[1]); break; default: WARN_ON_ONCE(1); } } #else static void nvme_tcp_reclassify_socket(struct socket *sock) { } #endif enum nvme_tcp_send_state { NVME_TCP_SEND_CMD_PDU = 0, NVME_TCP_SEND_H2C_PDU, NVME_TCP_SEND_DATA, NVME_TCP_SEND_DDGST, }; struct nvme_tcp_request { struct nvme_request req; void *pdu; struct nvme_tcp_queue *queue; u32 data_len; u32 pdu_len; u32 pdu_sent; u32 h2cdata_left; u32 h2cdata_offset; u16 ttag; __le16 status; struct list_head entry; struct llist_node lentry; __le32 ddgst; struct bio *curr_bio; struct iov_iter iter; /* send state */ size_t offset; size_t data_sent; enum nvme_tcp_send_state state; }; enum nvme_tcp_queue_flags { NVME_TCP_Q_ALLOCATED = 0, NVME_TCP_Q_LIVE = 1, NVME_TCP_Q_POLLING = 2, }; enum nvme_tcp_recv_state { NVME_TCP_RECV_PDU = 0, NVME_TCP_RECV_DATA, NVME_TCP_RECV_DDGST, }; struct nvme_tcp_ctrl; struct nvme_tcp_queue { struct socket *sock; struct work_struct io_work; int io_cpu; struct mutex queue_lock; struct mutex send_mutex; struct llist_head req_list; struct list_head send_list; /* recv state */ void *pdu; int pdu_remaining; int pdu_offset; size_t data_remaining; size_t ddgst_remaining; unsigned int nr_cqe; /* send state */ struct nvme_tcp_request *request; u32 maxh2cdata; size_t cmnd_capsule_len; struct nvme_tcp_ctrl *ctrl; unsigned long flags; bool rd_enabled; bool hdr_digest; bool data_digest; struct ahash_request *rcv_hash; struct ahash_request *snd_hash; __le32 exp_ddgst; __le32 recv_ddgst; struct page_frag_cache pf_cache; void (*state_change)(struct sock *); void (*data_ready)(struct sock *); void (*write_space)(struct sock *); }; struct nvme_tcp_ctrl { /* read only in the hot path */ struct nvme_tcp_queue *queues; struct blk_mq_tag_set tag_set; /* other member variables */ struct list_head list; struct blk_mq_tag_set admin_tag_set; struct sockaddr_storage addr; struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; struct work_struct err_work; struct delayed_work connect_work; struct nvme_tcp_request async_req; u32 io_queues[HCTX_MAX_TYPES]; }; static LIST_HEAD(nvme_tcp_ctrl_list); static DEFINE_MUTEX(nvme_tcp_ctrl_mutex); static struct workqueue_struct *nvme_tcp_wq; static const struct blk_mq_ops nvme_tcp_mq_ops; static const struct blk_mq_ops nvme_tcp_admin_mq_ops; static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) { return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); } static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) { return queue - queue->ctrl->queues; } static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) { u32 queue_idx = nvme_tcp_queue_id(queue); if (queue_idx == 0) return queue->ctrl->admin_tag_set.tags[queue_idx]; return queue->ctrl->tag_set.tags[queue_idx - 1]; } static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) { return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; } static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) { return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; } static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req) { return req->pdu; } static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req) { /* use the pdu space in the back for the data pdu */ return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) - sizeof(struct nvme_tcp_data_pdu); } static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req) { if (nvme_is_fabrics(req->req.cmd)) return NVME_TCP_ADMIN_CCSZ; return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); } static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req) { return req == &req->queue->ctrl->async_req; } static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req) { struct request *rq; if (unlikely(nvme_tcp_async_req(req))) return false; /* async events don't have a request */ rq = blk_mq_rq_from_pdu(req); return rq_data_dir(rq) == WRITE && req->data_len && req->data_len <= nvme_tcp_inline_data_size(req); } static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req) { return req->iter.bvec->bv_page; } static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req) { return req->iter.bvec->bv_offset + req->iter.iov_offset; } static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req) { return min_t(size_t, iov_iter_single_seg_count(&req->iter), req->pdu_len - req->pdu_sent); } static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req) { return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ? req->pdu_len - req->pdu_sent : 0; } static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, int len) { return nvme_tcp_pdu_data_left(req) <= len; } static void nvme_tcp_init_iter(struct nvme_tcp_request *req, unsigned int dir) { struct request *rq = blk_mq_rq_from_pdu(req); struct bio_vec *vec; unsigned int size; int nr_bvec; size_t offset; if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { vec = &rq->special_vec; nr_bvec = 1; size = blk_rq_payload_bytes(rq); offset = 0; } else { struct bio *bio = req->curr_bio; struct bvec_iter bi; struct bio_vec bv; vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); nr_bvec = 0; bio_for_each_bvec(bv, bio, bi) { nr_bvec++; } size = bio->bi_iter.bi_size; offset = bio->bi_iter.bi_bvec_done; } iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); req->iter.iov_offset = offset; } static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req, int len) { req->data_sent += len; req->pdu_sent += len; iov_iter_advance(&req->iter, len); if (!iov_iter_count(&req->iter) && req->data_sent < req->data_len) { req->curr_bio = req->curr_bio->bi_next; nvme_tcp_init_iter(req, ITER_SOURCE); } } static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) { int ret; /* drain the send queue as much as we can... */ do { ret = nvme_tcp_try_send(queue); } while (ret > 0); } static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) { return !list_empty(&queue->send_list) || !llist_empty(&queue->req_list); } static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, bool sync, bool last) { struct nvme_tcp_queue *queue = req->queue; bool empty; empty = llist_add(&req->lentry, &queue->req_list) && list_empty(&queue->send_list) && !queue->request; /* * if we're the first on the send_list and we can try to send * directly, otherwise queue io_work. Also, only do that if we * are on the same cpu, so we don't introduce contention. */ if (queue->io_cpu == raw_smp_processor_id() && sync && empty && mutex_trylock(&queue->send_mutex)) { nvme_tcp_send_all(queue); mutex_unlock(&queue->send_mutex); } if (last && nvme_tcp_queue_more(queue)) queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) { struct nvme_tcp_request *req; struct llist_node *node; for (node = llist_del_all(&queue->req_list); node; node = node->next) { req = llist_entry(node, struct nvme_tcp_request, lentry); list_add(&req->entry, &queue->send_list); } } static inline struct nvme_tcp_request * nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) { struct nvme_tcp_request *req; req = list_first_entry_or_null(&queue->send_list, struct nvme_tcp_request, entry); if (!req) { nvme_tcp_process_req_list(queue); req = list_first_entry_or_null(&queue->send_list, struct nvme_tcp_request, entry); if (unlikely(!req)) return NULL; } list_del(&req->entry); return req; } static inline void nvme_tcp_ddgst_final(struct ahash_request *hash, __le32 *dgst) { ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0); crypto_ahash_final(hash); } static inline void nvme_tcp_ddgst_update(struct ahash_request *hash, struct page *page, off_t off, size_t len) { struct scatterlist sg; sg_init_table(&sg, 1); sg_set_page(&sg, page, len, off); ahash_request_set_crypt(hash, &sg, NULL, len); crypto_ahash_update(hash); } static inline void nvme_tcp_hdgst(struct ahash_request *hash, void *pdu, size_t len) { struct scatterlist sg; sg_init_one(&sg, pdu, len); ahash_request_set_crypt(hash, &sg, pdu + len, len); crypto_ahash_digest(hash); } static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, void *pdu, size_t pdu_len) { struct nvme_tcp_hdr *hdr = pdu; __le32 recv_digest; __le32 exp_digest; if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { dev_err(queue->ctrl->ctrl.device, "queue %d: header digest flag is cleared\n", nvme_tcp_queue_id(queue)); return -EPROTO; } recv_digest = *(__le32 *)(pdu + hdr->hlen); nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); exp_digest = *(__le32 *)(pdu + hdr->hlen); if (recv_digest != exp_digest) { dev_err(queue->ctrl->ctrl.device, "header digest error: recv %#x expected %#x\n", le32_to_cpu(recv_digest), le32_to_cpu(exp_digest)); return -EIO; } return 0; } static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) { struct nvme_tcp_hdr *hdr = pdu; u8 digest_len = nvme_tcp_hdgst_len(queue); u32 len; len = le32_to_cpu(hdr->plen) - hdr->hlen - ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { dev_err(queue->ctrl->ctrl.device, "queue %d: data digest flag is cleared\n", nvme_tcp_queue_id(queue)); return -EPROTO; } crypto_ahash_init(queue->rcv_hash); return 0; } static void nvme_tcp_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); page_frag_free(req->pdu); } static int nvme_tcp_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_tcp_cmd_pdu *pdu; int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; u8 hdgst = nvme_tcp_hdgst_len(queue); req->pdu = page_frag_alloc(&queue->pf_cache, sizeof(struct nvme_tcp_cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!req->pdu) return -ENOMEM; pdu = req->pdu; req->queue = queue; nvme_req(rq)->ctrl = &ctrl->ctrl; nvme_req(rq)->cmd = &pdu->cmd; return 0; } static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; hctx->driver_data = queue; return 0; } static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); struct nvme_tcp_queue *queue = &ctrl->queues[0]; hctx->driver_data = queue; return 0; } static enum nvme_tcp_recv_state nvme_tcp_recv_state(struct nvme_tcp_queue *queue) { return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : NVME_TCP_RECV_DATA; } static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) { queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + nvme_tcp_hdgst_len(queue); queue->pdu_offset = 0; queue->data_remaining = -1; queue->ddgst_remaining = 0; } static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) { if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) return; dev_warn(ctrl->device, "starting error recovery\n"); queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); } static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, struct nvme_completion *cqe) { struct nvme_tcp_request *req; struct request *rq; rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); if (!rq) { dev_err(queue->ctrl->ctrl.device, "got bad cqe.command_id %#x on queue %d\n", cqe->command_id, nvme_tcp_queue_id(queue)); nvme_tcp_error_recovery(&queue->ctrl->ctrl); return -EINVAL; } req = blk_mq_rq_to_pdu(rq); if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) req->status = cqe->status; if (!nvme_try_complete_req(rq, req->status, cqe->result)) nvme_complete_rq(rq); queue->nr_cqe++; return 0; } static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, struct nvme_tcp_data_pdu *pdu) { struct request *rq; rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); if (!rq) { dev_err(queue->ctrl->ctrl.device, "got bad c2hdata.command_id %#x on queue %d\n", pdu->command_id, nvme_tcp_queue_id(queue)); return -ENOENT; } if (!blk_rq_payload_bytes(rq)) { dev_err(queue->ctrl->ctrl.device, "queue %d tag %#x unexpected data\n", nvme_tcp_queue_id(queue), rq->tag); return -EIO; } queue->data_remaining = le32_to_cpu(pdu->data_length); if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { dev_err(queue->ctrl->ctrl.device, "queue %d tag %#x SUCCESS set but not last PDU\n", nvme_tcp_queue_id(queue), rq->tag); nvme_tcp_error_recovery(&queue->ctrl->ctrl); return -EPROTO; } return 0; } static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, struct nvme_tcp_rsp_pdu *pdu) { struct nvme_completion *cqe = &pdu->cqe; int ret = 0; /* * AEN requests are special as they don't time out and can * survive any kind of queue freeze and often don't respond to * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), cqe->command_id))) nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); else ret = nvme_tcp_process_nvme_cqe(queue, cqe); return ret; } static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req); struct nvme_tcp_queue *queue = req->queue; struct request *rq = blk_mq_rq_from_pdu(req); u32 h2cdata_sent = req->pdu_len; u8 hdgst = nvme_tcp_hdgst_len(queue); u8 ddgst = nvme_tcp_ddgst_len(queue); req->state = NVME_TCP_SEND_H2C_PDU; req->offset = 0; req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); req->pdu_sent = 0; req->h2cdata_left -= req->pdu_len; req->h2cdata_offset += h2cdata_sent; memset(data, 0, sizeof(*data)); data->hdr.type = nvme_tcp_h2c_data; if (!req->h2cdata_left) data->hdr.flags = NVME_TCP_F_DATA_LAST; if (queue->hdr_digest) data->hdr.flags |= NVME_TCP_F_HDGST; if (queue->data_digest) data->hdr.flags |= NVME_TCP_F_DDGST; data->hdr.hlen = sizeof(*data); data->hdr.pdo = data->hdr.hlen + hdgst; data->hdr.plen = cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); data->ttag = req->ttag; data->command_id = nvme_cid(rq); data->data_offset = cpu_to_le32(req->h2cdata_offset); data->data_length = cpu_to_le32(req->pdu_len); } static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, struct nvme_tcp_r2t_pdu *pdu) { struct nvme_tcp_request *req; struct request *rq; u32 r2t_length = le32_to_cpu(pdu->r2t_length); u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); if (!rq) { dev_err(queue->ctrl->ctrl.device, "got bad r2t.command_id %#x on queue %d\n", pdu->command_id, nvme_tcp_queue_id(queue)); return -ENOENT; } req = blk_mq_rq_to_pdu(rq); if (unlikely(!r2t_length)) { dev_err(queue->ctrl->ctrl.device, "req %d r2t len is %u, probably a bug...\n", rq->tag, r2t_length); return -EPROTO; } if (unlikely(req->data_sent + r2t_length > req->data_len)) { dev_err(queue->ctrl->ctrl.device, "req %d r2t len %u exceeded data len %u (%zu sent)\n", rq->tag, r2t_length, req->data_len, req->data_sent); return -EPROTO; } if (unlikely(r2t_offset < req->data_sent)) { dev_err(queue->ctrl->ctrl.device, "req %d unexpected r2t offset %u (expected %zu)\n", rq->tag, r2t_offset, req->data_sent); return -EPROTO; } req->pdu_len = 0; req->h2cdata_left = r2t_length; req->h2cdata_offset = r2t_offset; req->ttag = pdu->ttag; nvme_tcp_setup_h2c_data_pdu(req); nvme_tcp_queue_request(req, false, true); return 0; } static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { struct nvme_tcp_hdr *hdr; char *pdu = queue->pdu; size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); int ret; ret = skb_copy_bits(skb, *offset, &pdu[queue->pdu_offset], rcv_len); if (unlikely(ret)) return ret; queue->pdu_remaining -= rcv_len; queue->pdu_offset += rcv_len; *offset += rcv_len; *len -= rcv_len; if (queue->pdu_remaining) return 0; hdr = queue->pdu; if (queue->hdr_digest) { ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); if (unlikely(ret)) return ret; } if (queue->data_digest) { ret = nvme_tcp_check_ddgst(queue, queue->pdu); if (unlikely(ret)) return ret; } switch (hdr->type) { case nvme_tcp_c2h_data: return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); case nvme_tcp_rsp: nvme_tcp_init_recv_ctx(queue); return nvme_tcp_handle_comp(queue, (void *)queue->pdu); case nvme_tcp_r2t: nvme_tcp_init_recv_ctx(queue); return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); default: dev_err(queue->ctrl->ctrl.device, "unsupported pdu type (%d)\n", hdr->type); return -EINVAL; } } static inline void nvme_tcp_end_request(struct request *rq, u16 status) { union nvme_result res = {}; if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res)) nvme_complete_rq(rq); } static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); while (true) { int recv_len, ret; recv_len = min_t(size_t, *len, queue->data_remaining); if (!recv_len) break; if (!iov_iter_count(&req->iter)) { req->curr_bio = req->curr_bio->bi_next; /* * If we don`t have any bios it means that controller * sent more data than we requested, hence error */ if (!req->curr_bio) { dev_err(queue->ctrl->ctrl.device, "queue %d no space in request %#x", nvme_tcp_queue_id(queue), rq->tag); nvme_tcp_init_recv_ctx(queue); return -EIO; } nvme_tcp_init_iter(req, ITER_DEST); } /* we can read only from what is left in this bio */ recv_len = min_t(size_t, recv_len, iov_iter_count(&req->iter)); if (queue->data_digest) ret = skb_copy_and_hash_datagram_iter(skb, *offset, &req->iter, recv_len, queue->rcv_hash); else ret = skb_copy_datagram_iter(skb, *offset, &req->iter, recv_len); if (ret) { dev_err(queue->ctrl->ctrl.device, "queue %d failed to copy request %#x data", nvme_tcp_queue_id(queue), rq->tag); return ret; } *len -= recv_len; *offset += recv_len; queue->data_remaining -= recv_len; } if (!queue->data_remaining) { if (queue->data_digest) { nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; } else { if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { nvme_tcp_end_request(rq, le16_to_cpu(req->status)); queue->nr_cqe++; } nvme_tcp_init_recv_ctx(queue); } } return 0; } static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; char *ddgst = (char *)&queue->recv_ddgst; size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; int ret; ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len); if (unlikely(ret)) return ret; queue->ddgst_remaining -= recv_len; *offset += recv_len; *len -= recv_len; if (queue->ddgst_remaining) return 0; if (queue->recv_ddgst != queue->exp_ddgst) { struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR); dev_err(queue->ctrl->ctrl.device, "data digest error: recv %#x expected %#x\n", le32_to_cpu(queue->recv_ddgst), le32_to_cpu(queue->exp_ddgst)); } if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); nvme_tcp_end_request(rq, le16_to_cpu(req->status)); queue->nr_cqe++; } nvme_tcp_init_recv_ctx(queue); return 0; } static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb, unsigned int offset, size_t len) { struct nvme_tcp_queue *queue = desc->arg.data; size_t consumed = len; int result; if (unlikely(!queue->rd_enabled)) return -EFAULT; while (len) { switch (nvme_tcp_recv_state(queue)) { case NVME_TCP_RECV_PDU: result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); break; case NVME_TCP_RECV_DATA: result = nvme_tcp_recv_data(queue, skb, &offset, &len); break; case NVME_TCP_RECV_DDGST: result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); break; default: result = -EFAULT; } if (result) { dev_err(queue->ctrl->ctrl.device, "receive failed: %d\n", result); queue->rd_enabled = false; nvme_tcp_error_recovery(&queue->ctrl->ctrl); return result; } } return consumed; } static void nvme_tcp_data_ready(struct sock *sk) { struct nvme_tcp_queue *queue; trace_sk_data_ready(sk); read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (likely(queue && queue->rd_enabled) && !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); read_unlock_bh(&sk->sk_callback_lock); } static void nvme_tcp_write_space(struct sock *sk) { struct nvme_tcp_queue *queue; read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (likely(queue && sk_stream_is_writeable(sk))) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } read_unlock_bh(&sk->sk_callback_lock); } static void nvme_tcp_state_change(struct sock *sk) { struct nvme_tcp_queue *queue; read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (!queue) goto done; switch (sk->sk_state) { case TCP_CLOSE: case TCP_CLOSE_WAIT: case TCP_LAST_ACK: case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: nvme_tcp_error_recovery(&queue->ctrl->ctrl); break; default: dev_info(queue->ctrl->ctrl.device, "queue %d socket state %d\n", nvme_tcp_queue_id(queue), sk->sk_state); } queue->state_change(sk); done: read_unlock_bh(&sk->sk_callback_lock); } static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) { queue->request = NULL; } static void nvme_tcp_fail_request(struct nvme_tcp_request *req) { if (nvme_tcp_async_req(req)) { union nvme_result res = {}; nvme_complete_async_event(&req->queue->ctrl->ctrl, cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res); } else { nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR); } } static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; int req_data_len = req->data_len; u32 h2cdata_left = req->h2cdata_left; while (true) { struct bio_vec bvec; struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; struct page *page = nvme_tcp_req_cur_page(req); size_t offset = nvme_tcp_req_cur_offset(req); size_t len = nvme_tcp_req_cur_length(req); bool last = nvme_tcp_pdu_last_send(req, len); int req_data_sent = req->data_sent; int ret; if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) msg.msg_flags |= MSG_EOR; else msg.msg_flags |= MSG_MORE; if (!sendpage_ok(page)) msg.msg_flags &= ~MSG_SPLICE_PAGES; bvec_set_page(&bvec, page, len, offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); ret = sock_sendmsg(queue->sock, &msg); if (ret <= 0) return ret; if (queue->data_digest) nvme_tcp_ddgst_update(queue->snd_hash, page, offset, ret); /* * update the request iterator except for the last payload send * in the request where we don't want to modify it as we may * compete with the RX path completing the request. */ if (req_data_sent + ret < req_data_len) nvme_tcp_advance_req(req, ret); /* fully successful last send in current PDU */ if (last && ret == len) { if (queue->data_digest) { nvme_tcp_ddgst_final(queue->snd_hash, &req->ddgst); req->state = NVME_TCP_SEND_DDGST; req->offset = 0; } else { if (h2cdata_left) nvme_tcp_setup_h2c_data_pdu(req); else nvme_tcp_done_send_req(queue); } return 1; } } return -EAGAIN; } static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct bio_vec bvec; struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; bool inline_data = nvme_tcp_has_inline_data(req); u8 hdgst = nvme_tcp_hdgst_len(queue); int len = sizeof(*pdu) + hdgst - req->offset; int ret; if (inline_data || nvme_tcp_queue_more(queue)) msg.msg_flags |= MSG_MORE; else msg.msg_flags |= MSG_EOR; if (queue->hdr_digest && !req->offset) nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); bvec_set_virt(&bvec, (void *)pdu + req->offset, len); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); ret = sock_sendmsg(queue->sock, &msg); if (unlikely(ret <= 0)) return ret; len -= ret; if (!len) { if (inline_data) { req->state = NVME_TCP_SEND_DATA; if (queue->data_digest) crypto_ahash_init(queue->snd_hash); } else { nvme_tcp_done_send_req(queue); } return 1; } req->offset += ret; return -EAGAIN; } static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req); struct bio_vec bvec; struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, }; u8 hdgst = nvme_tcp_hdgst_len(queue); int len = sizeof(*pdu) - req->offset + hdgst; int ret; if (queue->hdr_digest && !req->offset) nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); if (!req->h2cdata_left) msg.msg_flags |= MSG_SPLICE_PAGES; bvec_set_virt(&bvec, (void *)pdu + req->offset, len); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); ret = sock_sendmsg(queue->sock, &msg); if (unlikely(ret <= 0)) return ret; len -= ret; if (!len) { req->state = NVME_TCP_SEND_DATA; if (queue->data_digest) crypto_ahash_init(queue->snd_hash); return 1; } req->offset += ret; return -EAGAIN; } static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; size_t offset = req->offset; u32 h2cdata_left = req->h2cdata_left; int ret; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = (u8 *)&req->ddgst + req->offset, .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset }; if (nvme_tcp_queue_more(queue)) msg.msg_flags |= MSG_MORE; else msg.msg_flags |= MSG_EOR; ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); if (unlikely(ret <= 0)) return ret; if (offset + ret == NVME_TCP_DIGEST_LENGTH) { if (h2cdata_left) nvme_tcp_setup_h2c_data_pdu(req); else nvme_tcp_done_send_req(queue); return 1; } req->offset += ret; return -EAGAIN; } static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) { struct nvme_tcp_request *req; unsigned int noreclaim_flag; int ret = 1; if (!queue->request) { queue->request = nvme_tcp_fetch_request(queue); if (!queue->request) return 0; } req = queue->request; noreclaim_flag = memalloc_noreclaim_save(); if (req->state == NVME_TCP_SEND_CMD_PDU) { ret = nvme_tcp_try_send_cmd_pdu(req); if (ret <= 0) goto done; if (!nvme_tcp_has_inline_data(req)) goto out; } if (req->state == NVME_TCP_SEND_H2C_PDU) { ret = nvme_tcp_try_send_data_pdu(req); if (ret <= 0) goto done; } if (req->state == NVME_TCP_SEND_DATA) { ret = nvme_tcp_try_send_data(req); if (ret <= 0) goto done; } if (req->state == NVME_TCP_SEND_DDGST) ret = nvme_tcp_try_send_ddgst(req); done: if (ret == -EAGAIN) { ret = 0; } else if (ret < 0) { dev_err(queue->ctrl->ctrl.device, "failed to send request %d\n", ret); nvme_tcp_fail_request(queue->request); nvme_tcp_done_send_req(queue); } out: memalloc_noreclaim_restore(noreclaim_flag); return ret; } static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) { struct socket *sock = queue->sock; struct sock *sk = sock->sk; read_descriptor_t rd_desc; int consumed; rd_desc.arg.data = queue; rd_desc.count = 1; lock_sock(sk); queue->nr_cqe = 0; consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); release_sock(sk); return consumed; } static void nvme_tcp_io_work(struct work_struct *w) { struct nvme_tcp_queue *queue = container_of(w, struct nvme_tcp_queue, io_work); unsigned long deadline = jiffies + msecs_to_jiffies(1); do { bool pending = false; int result; if (mutex_trylock(&queue->send_mutex)) { result = nvme_tcp_try_send(queue); mutex_unlock(&queue->send_mutex); if (result > 0) pending = true; else if (unlikely(result < 0)) break; } result = nvme_tcp_try_recv(queue); if (result > 0) pending = true; else if (unlikely(result < 0)) return; if (!pending || !queue->rd_enabled) return; } while (!time_after(jiffies, deadline)); /* quota is exhausted */ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); ahash_request_free(queue->rcv_hash); ahash_request_free(queue->snd_hash); crypto_free_ahash(tfm); } static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) { struct crypto_ahash *tfm; tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return PTR_ERR(tfm); queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); if (!queue->snd_hash) goto free_tfm; ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); if (!queue->rcv_hash) goto free_snd_hash; ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); return 0; free_snd_hash: ahash_request_free(queue->snd_hash); free_tfm: crypto_free_ahash(tfm); return -ENOMEM; } static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) { struct nvme_tcp_request *async = &ctrl->async_req; page_frag_free(async->pdu); } static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) { struct nvme_tcp_queue *queue = &ctrl->queues[0]; struct nvme_tcp_request *async = &ctrl->async_req; u8 hdgst = nvme_tcp_hdgst_len(queue); async->pdu = page_frag_alloc(&queue->pf_cache, sizeof(struct nvme_tcp_cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!async->pdu) return -ENOMEM; async->queue = &ctrl->queues[0]; return 0; } static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) { struct page *page; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; unsigned int noreclaim_flag; if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) return; if (queue->hdr_digest || queue->data_digest) nvme_tcp_free_crypto(queue); if (queue->pf_cache.va) { page = virt_to_head_page(queue->pf_cache.va); __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); queue->pf_cache.va = NULL; } noreclaim_flag = memalloc_noreclaim_save(); sock_release(queue->sock); memalloc_noreclaim_restore(noreclaim_flag); kfree(queue->pdu); mutex_destroy(&queue->send_mutex); mutex_destroy(&queue->queue_lock); } static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) { struct nvme_tcp_icreq_pdu *icreq; struct nvme_tcp_icresp_pdu *icresp; struct msghdr msg = {}; struct kvec iov; bool ctrl_hdgst, ctrl_ddgst; u32 maxh2cdata; int ret; icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); if (!icreq) return -ENOMEM; icresp = kzalloc(sizeof(*icresp), GFP_KERNEL); if (!icresp) { ret = -ENOMEM; goto free_icreq; } icreq->hdr.type = nvme_tcp_icreq; icreq->hdr.hlen = sizeof(*icreq); icreq->hdr.pdo = 0; icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); icreq->maxr2t = 0; /* single inflight r2t supported */ icreq->hpda = 0; /* no alignment constraint */ if (queue->hdr_digest) icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; if (queue->data_digest) icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; iov.iov_base = icreq; iov.iov_len = sizeof(*icreq); ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); if (ret < 0) goto free_icresp; memset(&msg, 0, sizeof(msg)); iov.iov_base = icresp; iov.iov_len = sizeof(*icresp); ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); if (ret < 0) goto free_icresp; ret = -EINVAL; if (icresp->hdr.type != nvme_tcp_icresp) { pr_err("queue %d: bad type returned %d\n", nvme_tcp_queue_id(queue), icresp->hdr.type); goto free_icresp; } if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { pr_err("queue %d: bad pdu length returned %d\n", nvme_tcp_queue_id(queue), icresp->hdr.plen); goto free_icresp; } if (icresp->pfv != NVME_TCP_PFV_1_0) { pr_err("queue %d: bad pfv returned %d\n", nvme_tcp_queue_id(queue), icresp->pfv); goto free_icresp; } ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); if ((queue->data_digest && !ctrl_ddgst) || (!queue->data_digest && ctrl_ddgst)) { pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", nvme_tcp_queue_id(queue), queue->data_digest ? "enabled" : "disabled", ctrl_ddgst ? "enabled" : "disabled"); goto free_icresp; } ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); if ((queue->hdr_digest && !ctrl_hdgst) || (!queue->hdr_digest && ctrl_hdgst)) { pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", nvme_tcp_queue_id(queue), queue->hdr_digest ? "enabled" : "disabled", ctrl_hdgst ? "enabled" : "disabled"); goto free_icresp; } if (icresp->cpda != 0) { pr_err("queue %d: unsupported cpda returned %d\n", nvme_tcp_queue_id(queue), icresp->cpda); goto free_icresp; } maxh2cdata = le32_to_cpu(icresp->maxdata); if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) { pr_err("queue %d: invalid maxh2cdata returned %u\n", nvme_tcp_queue_id(queue), maxh2cdata); goto free_icresp; } queue->maxh2cdata = maxh2cdata; ret = 0; free_icresp: kfree(icresp); free_icreq: kfree(icreq); return ret; } static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) { return nvme_tcp_queue_id(queue) == 0; } static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) { struct nvme_tcp_ctrl *ctrl = queue->ctrl; int qid = nvme_tcp_queue_id(queue); return !nvme_tcp_admin_queue(queue) && qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; } static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) { struct nvme_tcp_ctrl *ctrl = queue->ctrl; int qid = nvme_tcp_queue_id(queue); return !nvme_tcp_admin_queue(queue) && !nvme_tcp_default_queue(queue) && qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + ctrl->io_queues[HCTX_TYPE_READ]; } static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) { struct nvme_tcp_ctrl *ctrl = queue->ctrl; int qid = nvme_tcp_queue_id(queue); return !nvme_tcp_admin_queue(queue) && !nvme_tcp_default_queue(queue) && !nvme_tcp_read_queue(queue) && qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + ctrl->io_queues[HCTX_TYPE_READ] + ctrl->io_queues[HCTX_TYPE_POLL]; } static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) { struct nvme_tcp_ctrl *ctrl = queue->ctrl; int qid = nvme_tcp_queue_id(queue); int n = 0; if (nvme_tcp_default_queue(queue)) n = qid - 1; else if (nvme_tcp_read_queue(queue)) n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; else if (nvme_tcp_poll_queue(queue)) n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - ctrl->io_queues[HCTX_TYPE_READ] - 1; queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); } static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; int ret, rcv_pdu_size; mutex_init(&queue->queue_lock); queue->ctrl = ctrl; init_llist_head(&queue->req_list); INIT_LIST_HEAD(&queue->send_list); mutex_init(&queue->send_mutex); INIT_WORK(&queue->io_work, nvme_tcp_io_work); if (qid > 0) queue->cmnd_capsule_len = nctrl->ioccsz * 16; else queue->cmnd_capsule_len = sizeof(struct nvme_command) + NVME_TCP_ADMIN_CCSZ; ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &queue->sock); if (ret) { dev_err(nctrl->device, "failed to create socket: %d\n", ret); goto err_destroy_mutex; } nvme_tcp_reclassify_socket(queue->sock); /* Single syn retry */ tcp_sock_set_syncnt(queue->sock->sk, 1); /* Set TCP no delay */ tcp_sock_set_nodelay(queue->sock->sk); /* * Cleanup whatever is sitting in the TCP transmit queue on socket * close. This is done to prevent stale data from being sent should * the network connection be restored before TCP times out. */ sock_no_linger(queue->sock->sk); if (so_priority > 0) sock_set_priority(queue->sock->sk, so_priority); /* Set socket type of service */ if (nctrl->opts->tos >= 0) ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); /* Set 10 seconds timeout for icresp recvmsg */ queue->sock->sk->sk_rcvtimeo = 10 * HZ; queue->sock->sk->sk_allocation = GFP_ATOMIC; queue->sock->sk->sk_use_task_frag = false; nvme_tcp_set_queue_io_cpu(queue); queue->request = NULL; queue->data_remaining = 0; queue->ddgst_remaining = 0; queue->pdu_remaining = 0; queue->pdu_offset = 0; sk_set_memalloc(queue->sock->sk); if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, sizeof(ctrl->src_addr)); if (ret) { dev_err(nctrl->device, "failed to bind queue %d socket %d\n", qid, ret); goto err_sock; } } if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { char *iface = nctrl->opts->host_iface; sockptr_t optval = KERNEL_SOCKPTR(iface); ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, optval, strlen(iface)); if (ret) { dev_err(nctrl->device, "failed to bind to interface %s queue %d err %d\n", iface, qid, ret); goto err_sock; } } queue->hdr_digest = nctrl->opts->hdr_digest; queue->data_digest = nctrl->opts->data_digest; if (queue->hdr_digest || queue->data_digest) { ret = nvme_tcp_alloc_crypto(queue); if (ret) { dev_err(nctrl->device, "failed to allocate queue %d crypto\n", qid); goto err_sock; } } rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) + nvme_tcp_hdgst_len(queue); queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); if (!queue->pdu) { ret = -ENOMEM; goto err_crypto; } dev_dbg(nctrl->device, "connecting queue %d\n", nvme_tcp_queue_id(queue)); ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, sizeof(ctrl->addr), 0); if (ret) { dev_err(nctrl->device, "failed to connect socket: %d\n", ret); goto err_rcv_pdu; } ret = nvme_tcp_init_connection(queue); if (ret) goto err_init_connect; set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); return 0; err_init_connect: kernel_sock_shutdown(queue->sock, SHUT_RDWR); err_rcv_pdu: kfree(queue->pdu); err_crypto: if (queue->hdr_digest || queue->data_digest) nvme_tcp_free_crypto(queue); err_sock: sock_release(queue->sock); queue->sock = NULL; err_destroy_mutex: mutex_destroy(&queue->send_mutex); mutex_destroy(&queue->queue_lock); return ret; } static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) { struct socket *sock = queue->sock; write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_user_data = NULL; sock->sk->sk_data_ready = queue->data_ready; sock->sk->sk_state_change = queue->state_change; sock->sk->sk_write_space = queue->write_space; write_unlock_bh(&sock->sk->sk_callback_lock); } static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) { kernel_sock_shutdown(queue->sock, SHUT_RDWR); nvme_tcp_restore_sock_ops(queue); cancel_work_sync(&queue->io_work); } static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) return; mutex_lock(&queue->queue_lock); if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) __nvme_tcp_stop_queue(queue); mutex_unlock(&queue->queue_lock); } static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) { write_lock_bh(&queue->sock->sk->sk_callback_lock); queue->sock->sk->sk_user_data = queue; queue->state_change = queue->sock->sk->sk_state_change; queue->data_ready = queue->sock->sk->sk_data_ready; queue->write_space = queue->sock->sk->sk_write_space; queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; queue->sock->sk->sk_state_change = nvme_tcp_state_change; queue->sock->sk->sk_write_space = nvme_tcp_write_space; #ifdef CONFIG_NET_RX_BUSY_POLL queue->sock->sk->sk_ll_usec = 1; #endif write_unlock_bh(&queue->sock->sk->sk_callback_lock); } static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[idx]; int ret; queue->rd_enabled = true; nvme_tcp_init_recv_ctx(queue); nvme_tcp_setup_sock_ops(queue); if (idx) ret = nvmf_connect_io_queue(nctrl, idx); else ret = nvmf_connect_admin_queue(nctrl); if (!ret) { set_bit(NVME_TCP_Q_LIVE, &queue->flags); } else { if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) __nvme_tcp_stop_queue(queue); dev_err(nctrl->device, "failed to connect queue: %d ret=%d\n", idx, ret); } return ret; } static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) { if (to_tcp_ctrl(ctrl)->async_req.pdu) { cancel_work_sync(&ctrl->async_event_work); nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); to_tcp_ctrl(ctrl)->async_req.pdu = NULL; } nvme_tcp_free_queue(ctrl, 0); } static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) { int i; for (i = 1; i < ctrl->queue_count; i++) nvme_tcp_free_queue(ctrl, i); } static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) { int i; for (i = 1; i < ctrl->queue_count; i++) nvme_tcp_stop_queue(ctrl, i); } static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, int first, int last) { int i, ret; for (i = first; i < last; i++) { ret = nvme_tcp_start_queue(ctrl, i); if (ret) goto out_stop_queues; } return 0; out_stop_queues: for (i--; i >= first; i--) nvme_tcp_stop_queue(ctrl, i); return ret; } static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) { int ret; ret = nvme_tcp_alloc_queue(ctrl, 0); if (ret) return ret; ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); if (ret) goto out_free_queue; return 0; out_free_queue: nvme_tcp_free_queue(ctrl, 0); return ret; } static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) { int i, ret; for (i = 1; i < ctrl->queue_count; i++) { ret = nvme_tcp_alloc_queue(ctrl, i); if (ret) goto out_free_queues; } return 0; out_free_queues: for (i--; i >= 1; i--) nvme_tcp_free_queue(ctrl, i); return ret; } static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) { unsigned int nr_io_queues; int ret; nr_io_queues = nvmf_nr_io_queues(ctrl->opts); ret = nvme_set_queue_count(ctrl, &nr_io_queues); if (ret) return ret; if (nr_io_queues == 0) { dev_err(ctrl->device, "unable to set any I/O queues\n"); return -ENOMEM; } ctrl->queue_count = nr_io_queues + 1; dev_info(ctrl->device, "creating %d I/O queues.\n", nr_io_queues); nvmf_set_io_queues(ctrl->opts, nr_io_queues, to_tcp_ctrl(ctrl)->io_queues); return __nvme_tcp_alloc_io_queues(ctrl); } static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) { nvme_tcp_stop_io_queues(ctrl); if (remove) nvme_remove_io_tag_set(ctrl); nvme_tcp_free_io_queues(ctrl); } static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) { int ret, nr_queues; ret = nvme_tcp_alloc_io_queues(ctrl); if (ret) return ret; if (new) { ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set, &nvme_tcp_mq_ops, ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, sizeof(struct nvme_tcp_request)); if (ret) goto out_free_io_queues; } /* * Only start IO queues for which we have allocated the tagset * and limitted it to the available queues. On reconnects, the * queue number might have changed. */ nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues); if (ret) goto out_cleanup_connect_q; if (!new) { nvme_start_freeze(ctrl); nvme_unquiesce_io_queues(ctrl); if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { /* * If we timed out waiting for freeze we are likely to * be stuck. Fail the controller initialization just * to be safe. */ ret = -ENODEV; nvme_unfreeze(ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->tagset, ctrl->queue_count - 1); nvme_unfreeze(ctrl); } /* * If the number of queues has increased (reconnect case) * start all new queues now. */ ret = nvme_tcp_start_io_queues(ctrl, nr_queues, ctrl->tagset->nr_hw_queues + 1); if (ret) goto out_wait_freeze_timed_out; return 0; out_wait_freeze_timed_out: nvme_quiesce_io_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); out_cleanup_connect_q: nvme_cancel_tagset(ctrl); if (new) nvme_remove_io_tag_set(ctrl); out_free_io_queues: nvme_tcp_free_io_queues(ctrl); return ret; } static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) { nvme_tcp_stop_queue(ctrl, 0); if (remove) nvme_remove_admin_tag_set(ctrl); nvme_tcp_free_admin_queue(ctrl); } static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) { int error; error = nvme_tcp_alloc_admin_queue(ctrl); if (error) return error; if (new) { error = nvme_alloc_admin_tag_set(ctrl, &to_tcp_ctrl(ctrl)->admin_tag_set, &nvme_tcp_admin_mq_ops, sizeof(struct nvme_tcp_request)); if (error) goto out_free_queue; } error = nvme_tcp_start_queue(ctrl, 0); if (error) goto out_cleanup_tagset; error = nvme_enable_ctrl(ctrl); if (error) goto out_stop_queue; nvme_unquiesce_admin_queue(ctrl); error = nvme_init_ctrl_finish(ctrl, false); if (error) goto out_quiesce_queue; return 0; out_quiesce_queue: nvme_quiesce_admin_queue(ctrl); blk_sync_queue(ctrl->admin_q); out_stop_queue: nvme_tcp_stop_queue(ctrl, 0); nvme_cancel_admin_tagset(ctrl); out_cleanup_tagset: if (new) nvme_remove_admin_tag_set(ctrl); out_free_queue: nvme_tcp_free_admin_queue(ctrl); return error; } static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, bool remove) { nvme_quiesce_admin_queue(ctrl); blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); nvme_cancel_admin_tagset(ctrl); if (remove) nvme_unquiesce_admin_queue(ctrl); nvme_tcp_destroy_admin_queue(ctrl, remove); } static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, bool remove) { if (ctrl->queue_count <= 1) return; nvme_quiesce_admin_queue(ctrl); nvme_quiesce_io_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); nvme_cancel_tagset(ctrl); if (remove) nvme_unquiesce_io_queues(ctrl); nvme_tcp_destroy_io_queues(ctrl, remove); } static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) { /* If we are resetting/deleting then do nothing */ if (ctrl->state != NVME_CTRL_CONNECTING) { WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || ctrl->state == NVME_CTRL_LIVE); return; } if (nvmf_should_reconnect(ctrl)) { dev_info(ctrl->device, "Reconnecting in %d seconds...\n", ctrl->opts->reconnect_delay); queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, ctrl->opts->reconnect_delay * HZ); } else { dev_info(ctrl->device, "Removing controller...\n"); nvme_delete_ctrl(ctrl); } } static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) { struct nvmf_ctrl_options *opts = ctrl->opts; int ret; ret = nvme_tcp_configure_admin_queue(ctrl, new); if (ret) return ret; if (ctrl->icdoff) { ret = -EOPNOTSUPP; dev_err(ctrl->device, "icdoff is not supported!\n"); goto destroy_admin; } if (!nvme_ctrl_sgl_supported(ctrl)) { ret = -EOPNOTSUPP; dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); goto destroy_admin; } if (opts->queue_size > ctrl->sqsize + 1) dev_warn(ctrl->device, "queue_size %zu > ctrl sqsize %u, clamping down\n", opts->queue_size, ctrl->sqsize + 1); if (ctrl->sqsize + 1 > ctrl->maxcmd) { dev_warn(ctrl->device, "sqsize %u > ctrl maxcmd %u, clamping down\n", ctrl->sqsize + 1, ctrl->maxcmd); ctrl->sqsize = ctrl->maxcmd - 1; } if (ctrl->queue_count > 1) { ret = nvme_tcp_configure_io_queues(ctrl, new); if (ret) goto destroy_admin; } if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { /* * state change failure is ok if we started ctrl delete, * unless we're during creation of a new controller to * avoid races with teardown flow. */ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && ctrl->state != NVME_CTRL_DELETING_NOIO); WARN_ON_ONCE(new); ret = -EINVAL; goto destroy_io; } nvme_start_ctrl(ctrl); return 0; destroy_io: if (ctrl->queue_count > 1) { nvme_quiesce_io_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); nvme_cancel_tagset(ctrl); nvme_tcp_destroy_io_queues(ctrl, new); } destroy_admin: nvme_quiesce_admin_queue(ctrl); blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); nvme_cancel_admin_tagset(ctrl); nvme_tcp_destroy_admin_queue(ctrl, new); return ret; } static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work) { struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work), struct nvme_tcp_ctrl, connect_work); struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; ++ctrl->nr_reconnects; if (nvme_tcp_setup_ctrl(ctrl, false)) goto requeue; dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", ctrl->nr_reconnects); ctrl->nr_reconnects = 0; return; requeue: dev_info(ctrl->device, "Failed reconnect attempt %d\n", ctrl->nr_reconnects); nvme_tcp_reconnect_or_remove(ctrl); } static void nvme_tcp_error_recovery_work(struct work_struct *work) { struct nvme_tcp_ctrl *tcp_ctrl = container_of(work, struct nvme_tcp_ctrl, err_work); struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; nvme_stop_keep_alive(ctrl); flush_work(&ctrl->async_event_work); nvme_tcp_teardown_io_queues(ctrl, false); /* unquiesce to fail fast pending requests */ nvme_unquiesce_io_queues(ctrl); nvme_tcp_teardown_admin_queue(ctrl, false); nvme_unquiesce_admin_queue(ctrl); nvme_auth_stop(ctrl); if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { /* state change failure is ok if we started ctrl delete */ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && ctrl->state != NVME_CTRL_DELETING_NOIO); return; } nvme_tcp_reconnect_or_remove(ctrl); } static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) { nvme_tcp_teardown_io_queues(ctrl, shutdown); nvme_quiesce_admin_queue(ctrl); nvme_disable_ctrl(ctrl, shutdown); nvme_tcp_teardown_admin_queue(ctrl, shutdown); } static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) { nvme_tcp_teardown_ctrl(ctrl, true); } static void nvme_reset_ctrl_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, reset_work); nvme_stop_ctrl(ctrl); nvme_tcp_teardown_ctrl(ctrl, false); if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { /* state change failure is ok if we started ctrl delete */ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && ctrl->state != NVME_CTRL_DELETING_NOIO); return; } if (nvme_tcp_setup_ctrl(ctrl, false)) goto out_fail; return; out_fail: ++ctrl->nr_reconnects; nvme_tcp_reconnect_or_remove(ctrl); } static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) { flush_work(&to_tcp_ctrl(ctrl)->err_work); cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); } static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); if (list_empty(&ctrl->list)) goto free_ctrl; mutex_lock(&nvme_tcp_ctrl_mutex); list_del(&ctrl->list); mutex_unlock(&nvme_tcp_ctrl_mutex); nvmf_free_options(nctrl->opts); free_ctrl: kfree(ctrl->queues); kfree(ctrl); } static void nvme_tcp_set_sg_null(struct nvme_command *c) { struct nvme_sgl_desc *sg = &c->common.dptr.sgl; sg->addr = 0; sg->length = 0; sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | NVME_SGL_FMT_TRANSPORT_A; } static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, struct nvme_command *c, u32 data_len) { struct nvme_sgl_desc *sg = &c->common.dptr.sgl; sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); sg->length = cpu_to_le32(data_len); sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; } static void nvme_tcp_set_sg_host_data(struct nvme_command *c, u32 data_len) { struct nvme_sgl_desc *sg = &c->common.dptr.sgl; sg->addr = 0; sg->length = cpu_to_le32(data_len); sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | NVME_SGL_FMT_TRANSPORT_A; } static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); struct nvme_tcp_queue *queue = &ctrl->queues[0]; struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; struct nvme_command *cmd = &pdu->cmd; u8 hdgst = nvme_tcp_hdgst_len(queue); memset(pdu, 0, sizeof(*pdu)); pdu->hdr.type = nvme_tcp_cmd; if (queue->hdr_digest) pdu->hdr.flags |= NVME_TCP_F_HDGST; pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); cmd->common.opcode = nvme_admin_async_event; cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; cmd->common.flags |= NVME_CMD_SGL_METABUF; nvme_tcp_set_sg_null(cmd); ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; ctrl->async_req.offset = 0; ctrl->async_req.curr_bio = NULL; ctrl->async_req.data_len = 0; nvme_tcp_queue_request(&ctrl->async_req, true, true); } static void nvme_tcp_complete_timed_out(struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); nvmf_complete_timed_out_request(rq); } static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype; int qid = nvme_tcp_queue_id(req->queue); dev_warn(ctrl->device, "queue %d: timeout cid %#x type %d opcode %#x (%s)\n", nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type, opc, nvme_opcode_str(qid, opc, fctype)); if (ctrl->state != NVME_CTRL_LIVE) { /* * If we are resetting, connecting or deleting we should * complete immediately because we may block controller * teardown or setup sequence * - ctrl disable/shutdown fabrics requests * - connect requests * - initialization admin requests * - I/O requests that entered after unquiescing and * the controller stopped responding * * All other requests should be cancelled by the error * recovery work, so it's fine that we fail it here. */ nvme_tcp_complete_timed_out(rq); return BLK_EH_DONE; } /* * LIVE state should trigger the normal error recovery which will * handle completing this request. */ nvme_tcp_error_recovery(ctrl); return BLK_EH_RESET_TIMER; } static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct nvme_command *c = &pdu->cmd; c->common.flags |= NVME_CMD_SGL_METABUF; if (!blk_rq_nr_phys_segments(rq)) nvme_tcp_set_sg_null(c); else if (rq_data_dir(rq) == WRITE && req->data_len <= nvme_tcp_inline_data_size(req)) nvme_tcp_set_sg_inline(queue, c, req->data_len); else nvme_tcp_set_sg_host_data(c, req->data_len); return 0; } static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct nvme_tcp_queue *queue = req->queue; u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; blk_status_t ret; ret = nvme_setup_cmd(ns, rq); if (ret) return ret; req->state = NVME_TCP_SEND_CMD_PDU; req->status = cpu_to_le16(NVME_SC_SUCCESS); req->offset = 0; req->data_sent = 0; req->pdu_len = 0; req->pdu_sent = 0; req->h2cdata_left = 0; req->data_len = blk_rq_nr_phys_segments(rq) ? blk_rq_payload_bytes(rq) : 0; req->curr_bio = rq->bio; if (req->curr_bio && req->data_len) nvme_tcp_init_iter(req, rq_data_dir(rq)); if (rq_data_dir(rq) == WRITE && req->data_len <= nvme_tcp_inline_data_size(req)) req->pdu_len = req->data_len; pdu->hdr.type = nvme_tcp_cmd; pdu->hdr.flags = 0; if (queue->hdr_digest) pdu->hdr.flags |= NVME_TCP_F_HDGST; if (queue->data_digest && req->pdu_len) { pdu->hdr.flags |= NVME_TCP_F_DDGST; ddgst = nvme_tcp_ddgst_len(queue); } pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); ret = nvme_tcp_map_data(queue, rq); if (unlikely(ret)) { nvme_cleanup_cmd(rq); dev_err(queue->ctrl->ctrl.device, "Failed to map data (%d)\n", ret); return ret; } return 0; } static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) { struct nvme_tcp_queue *queue = hctx->driver_data; if (!llist_empty(&queue->req_list)) queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nvme_ns *ns = hctx->queue->queuedata; struct nvme_tcp_queue *queue = hctx->driver_data; struct request *rq = bd->rq; struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); blk_status_t ret; if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); ret = nvme_tcp_setup_cmd_pdu(ns, rq); if (unlikely(ret)) return ret; nvme_start_request(rq); nvme_tcp_queue_request(req, true, bd->last); return BLK_STS_OK; } static void nvme_tcp_map_queues(struct blk_mq_tag_set *set) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); } static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) return 0; set_bit(NVME_TCP_Q_POLLING, &queue->flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) sk_busy_loop(sk, true); nvme_tcp_try_recv(queue); clear_bit(NVME_TCP_Q_POLLING, &queue->flags); return queue->nr_cqe; } static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) { struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; struct sockaddr_storage src_addr; int ret, len; len = nvmf_get_address(ctrl, buf, size); mutex_lock(&queue->queue_lock); if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) goto done; ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); if (ret > 0) { if (len > 0) len--; /* strip trailing newline */ len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", (len) ? "," : "", &src_addr); } done: mutex_unlock(&queue->queue_lock); return len; } static const struct blk_mq_ops nvme_tcp_mq_ops = { .queue_rq = nvme_tcp_queue_rq, .commit_rqs = nvme_tcp_commit_rqs, .complete = nvme_complete_rq, .init_request = nvme_tcp_init_request, .exit_request = nvme_tcp_exit_request, .init_hctx = nvme_tcp_init_hctx, .timeout = nvme_tcp_timeout, .map_queues = nvme_tcp_map_queues, .poll = nvme_tcp_poll, }; static const struct blk_mq_ops nvme_tcp_admin_mq_ops = { .queue_rq = nvme_tcp_queue_rq, .complete = nvme_complete_rq, .init_request = nvme_tcp_init_request, .exit_request = nvme_tcp_exit_request, .init_hctx = nvme_tcp_init_admin_hctx, .timeout = nvme_tcp_timeout, }; static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { .name = "tcp", .module = THIS_MODULE, .flags = NVME_F_FABRICS | NVME_F_BLOCKING, .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, .free_ctrl = nvme_tcp_free_ctrl, .submit_async_event = nvme_tcp_submit_async_event, .delete_ctrl = nvme_tcp_delete_ctrl, .get_address = nvme_tcp_get_address, .stop_ctrl = nvme_tcp_stop_ctrl, }; static bool nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts) { struct nvme_tcp_ctrl *ctrl; bool found = false; mutex_lock(&nvme_tcp_ctrl_mutex); list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { found = nvmf_ip_options_match(&ctrl->ctrl, opts); if (found) break; } mutex_unlock(&nvme_tcp_ctrl_mutex); return found; } static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { struct nvme_tcp_ctrl *ctrl; int ret; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&ctrl->list); ctrl->ctrl.opts = opts; ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + opts->nr_poll_queues + 1; ctrl->ctrl.sqsize = opts->queue_size - 1; ctrl->ctrl.kato = opts->kato; INIT_DELAYED_WORK(&ctrl->connect_work, nvme_tcp_reconnect_ctrl_work); INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); if (!(opts->mask & NVMF_OPT_TRSVCID)) { opts->trsvcid = kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL); if (!opts->trsvcid) { ret = -ENOMEM; goto out_free_ctrl; } opts->mask |= NVMF_OPT_TRSVCID; } ret = inet_pton_with_scope(&init_net, AF_UNSPEC, opts->traddr, opts->trsvcid, &ctrl->addr); if (ret) { pr_err("malformed address passed: %s:%s\n", opts->traddr, opts->trsvcid); goto out_free_ctrl; } if (opts->mask & NVMF_OPT_HOST_TRADDR) { ret = inet_pton_with_scope(&init_net, AF_UNSPEC, opts->host_traddr, NULL, &ctrl->src_addr); if (ret) { pr_err("malformed src address passed: %s\n", opts->host_traddr); goto out_free_ctrl; } } if (opts->mask & NVMF_OPT_HOST_IFACE) { if (!__dev_get_by_name(&init_net, opts->host_iface)) { pr_err("invalid interface passed: %s\n", opts->host_iface); ret = -ENODEV; goto out_free_ctrl; } } if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { ret = -EALREADY; goto out_free_ctrl; } ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), GFP_KERNEL); if (!ctrl->queues) { ret = -ENOMEM; goto out_free_ctrl; } ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); if (ret) goto out_kfree_queues; if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { WARN_ON_ONCE(1); ret = -EINTR; goto out_uninit_ctrl; } ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); if (ret) goto out_uninit_ctrl; dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); mutex_lock(&nvme_tcp_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); mutex_unlock(&nvme_tcp_ctrl_mutex); return &ctrl->ctrl; out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); out_kfree_queues: kfree(ctrl->queues); out_free_ctrl: kfree(ctrl); return ERR_PTR(ret); } static struct nvmf_transport_ops nvme_tcp_transport = { .name = "tcp", .module = THIS_MODULE, .required_opts = NVMF_OPT_TRADDR, .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE, .create_ctrl = nvme_tcp_create_ctrl, }; static int __init nvme_tcp_init_module(void) { BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8); BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72); BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24); BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24); BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24); BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128); BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128); BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24); nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!nvme_tcp_wq) return -ENOMEM; nvmf_register_transport(&nvme_tcp_transport); return 0; } static void __exit nvme_tcp_cleanup_module(void) { struct nvme_tcp_ctrl *ctrl; nvmf_unregister_transport(&nvme_tcp_transport); mutex_lock(&nvme_tcp_ctrl_mutex); list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) nvme_delete_ctrl(&ctrl->ctrl); mutex_unlock(&nvme_tcp_ctrl_mutex); flush_workqueue(nvme_delete_wq); destroy_workqueue(nvme_tcp_wq); } module_init(nvme_tcp_init_module); module_exit(nvme_tcp_cleanup_module); MODULE_LICENSE("GPL v2");
linux-master
drivers/nvme/host/tcp.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2017-2018 Christoph Hellwig. */ #include <linux/backing-dev.h> #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <trace/events/block.h> #include "nvme.h" bool multipath = true; module_param(multipath, bool, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); static const char *nvme_iopolicy_names[] = { [NVME_IOPOLICY_NUMA] = "numa", [NVME_IOPOLICY_RR] = "round-robin", }; static int iopolicy = NVME_IOPOLICY_NUMA; static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp) { if (!val) return -EINVAL; if (!strncmp(val, "numa", 4)) iopolicy = NVME_IOPOLICY_NUMA; else if (!strncmp(val, "round-robin", 11)) iopolicy = NVME_IOPOLICY_RR; else return -EINVAL; return 0; } static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp) { return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]); } module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy, &iopolicy, 0644); MODULE_PARM_DESC(iopolicy, "Default multipath I/O policy; 'numa' (default) or 'round-robin'"); void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys) { subsys->iopolicy = iopolicy; } void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) { struct nvme_ns_head *h; lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) if (h->disk) blk_mq_unfreeze_queue(h->disk->queue); } void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) { struct nvme_ns_head *h; lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) if (h->disk) blk_mq_freeze_queue_wait(h->disk->queue); } void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) { struct nvme_ns_head *h; lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) if (h->disk) blk_freeze_queue_start(h->disk->queue); } void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; u16 status = nvme_req(req)->status & 0x7ff; unsigned long flags; struct bio *bio; nvme_mpath_clear_current_path(ns); /* * If we got back an ANA error, we know the controller is alive but not * ready to serve this namespace. Kick of a re-read of the ANA * information page, and just try any other available path for now. */ if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { set_bit(NVME_NS_ANA_PENDING, &ns->flags); queue_work(nvme_wq, &ns->ctrl->ana_work); } spin_lock_irqsave(&ns->head->requeue_lock, flags); for (bio = req->bio; bio; bio = bio->bi_next) { bio_set_dev(bio, ns->head->disk->part0); if (bio->bi_opf & REQ_POLLED) { bio->bi_opf &= ~REQ_POLLED; bio->bi_cookie = BLK_QC_T_NONE; } /* * The alternate request queue that we may end up submitting * the bio to may be frozen temporarily, in this case REQ_NOWAIT * will fail the I/O immediately with EAGAIN to the issuer. * We are not in the issuer context which cannot block. Clear * the flag to avoid spurious EAGAIN I/O failures. */ bio->bi_opf &= ~REQ_NOWAIT; } blk_steal_bios(&ns->head->requeue_list, req); spin_unlock_irqrestore(&ns->head->requeue_lock, flags); blk_mq_end_request(req, 0); kblockd_schedule_work(&ns->head->requeue_work); } void nvme_mpath_start_request(struct request *rq) { struct nvme_ns *ns = rq->q->queuedata; struct gendisk *disk = ns->head->disk; if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) return; nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq), jiffies); } EXPORT_SYMBOL_GPL(nvme_mpath_start_request); void nvme_mpath_end_request(struct request *rq) { struct nvme_ns *ns = rq->q->queuedata; if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) return; bdev_end_io_acct(ns->head->disk->part0, req_op(rq), blk_rq_bytes(rq) >> SECTOR_SHIFT, nvme_req(rq)->start_time); } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { if (!ns->head->disk) continue; kblockd_schedule_work(&ns->head->requeue_work); if (ctrl->state == NVME_CTRL_LIVE) disk_uevent(ns->head->disk, KOBJ_CHANGE); } up_read(&ctrl->namespaces_rwsem); } static const char *nvme_ana_state_names[] = { [0] = "invalid state", [NVME_ANA_OPTIMIZED] = "optimized", [NVME_ANA_NONOPTIMIZED] = "non-optimized", [NVME_ANA_INACCESSIBLE] = "inaccessible", [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss", [NVME_ANA_CHANGE] = "change", }; bool nvme_mpath_clear_current_path(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; bool changed = false; int node; if (!head) goto out; for_each_node(node) { if (ns == rcu_access_pointer(head->current_path[node])) { rcu_assign_pointer(head->current_path[node], NULL); changed = true; } } out: return changed; } void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { nvme_mpath_clear_current_path(ns); kblockd_schedule_work(&ns->head->requeue_work); } up_read(&ctrl->namespaces_rwsem); } void nvme_mpath_revalidate_paths(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; sector_t capacity = get_capacity(head->disk); int node; int srcu_idx; srcu_idx = srcu_read_lock(&head->srcu); list_for_each_entry_rcu(ns, &head->list, siblings) { if (capacity != get_capacity(ns->disk)) clear_bit(NVME_NS_READY, &ns->flags); } srcu_read_unlock(&head->srcu, srcu_idx); for_each_node(node) rcu_assign_pointer(head->current_path[node], NULL); kblockd_schedule_work(&head->requeue_work); } static bool nvme_path_is_disabled(struct nvme_ns *ns) { /* * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should * still be able to complete assuming that the controller is connected. * Otherwise it will fail immediately and return to the requeue list. */ if (ns->ctrl->state != NVME_CTRL_LIVE && ns->ctrl->state != NVME_CTRL_DELETING) return true; if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || !test_bit(NVME_NS_READY, &ns->flags)) return true; return false; } static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) { int found_distance = INT_MAX, fallback_distance = INT_MAX, distance; struct nvme_ns *found = NULL, *fallback = NULL, *ns; list_for_each_entry_rcu(ns, &head->list, siblings) { if (nvme_path_is_disabled(ns)) continue; if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) distance = node_distance(node, ns->ctrl->numa_node); else distance = LOCAL_DISTANCE; switch (ns->ana_state) { case NVME_ANA_OPTIMIZED: if (distance < found_distance) { found_distance = distance; found = ns; } break; case NVME_ANA_NONOPTIMIZED: if (distance < fallback_distance) { fallback_distance = distance; fallback = ns; } break; default: break; } } if (!found) found = fallback; if (found) rcu_assign_pointer(head->current_path[node], found); return found; } static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, struct nvme_ns *ns) { ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, siblings); if (ns) return ns; return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); } static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, int node, struct nvme_ns *old) { struct nvme_ns *ns, *found = NULL; if (list_is_singular(&head->list)) { if (nvme_path_is_disabled(old)) return NULL; return old; } for (ns = nvme_next_ns(head, old); ns && ns != old; ns = nvme_next_ns(head, ns)) { if (nvme_path_is_disabled(ns)) continue; if (ns->ana_state == NVME_ANA_OPTIMIZED) { found = ns; goto out; } if (ns->ana_state == NVME_ANA_NONOPTIMIZED) found = ns; } /* * The loop above skips the current path for round-robin semantics. * Fall back to the current path if either: * - no other optimized path found and current is optimized, * - no other usable path found and current is usable. */ if (!nvme_path_is_disabled(old) && (old->ana_state == NVME_ANA_OPTIMIZED || (!found && old->ana_state == NVME_ANA_NONOPTIMIZED))) return old; if (!found) return NULL; out: rcu_assign_pointer(head->current_path[node], found); return found; } static inline bool nvme_path_is_optimized(struct nvme_ns *ns) { return ns->ctrl->state == NVME_CTRL_LIVE && ns->ana_state == NVME_ANA_OPTIMIZED; } inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) { int node = numa_node_id(); struct nvme_ns *ns; ns = srcu_dereference(head->current_path[node], &head->srcu); if (unlikely(!ns)) return __nvme_find_path(head, node); if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR) return nvme_round_robin_path(head, node, ns); if (unlikely(!nvme_path_is_optimized(ns))) return __nvme_find_path(head, node); return ns; } static bool nvme_available_path(struct nvme_ns_head *head) { struct nvme_ns *ns; list_for_each_entry_rcu(ns, &head->list, siblings) { if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) continue; switch (ns->ctrl->state) { case NVME_CTRL_LIVE: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: /* fallthru */ return true; default: break; } } return false; } static void nvme_ns_head_submit_bio(struct bio *bio) { struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; struct device *dev = disk_to_dev(head->disk); struct nvme_ns *ns; int srcu_idx; /* * The namespace might be going away and the bio might be moved to a * different queue via blk_steal_bios(), so we need to use the bio_split * pool from the original queue to allocate the bvecs from. */ bio = bio_split_to_limits(bio); if (!bio) return; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); if (likely(ns)) { bio_set_dev(bio, ns->disk->part0); bio->bi_opf |= REQ_NVME_MPATH; trace_block_bio_remap(bio, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); submit_bio_noacct(bio); } else if (nvme_available_path(head)) { dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); spin_lock_irq(&head->requeue_lock); bio_list_add(&head->requeue_list, bio); spin_unlock_irq(&head->requeue_lock); } else { dev_warn_ratelimited(dev, "no available path - failing I/O\n"); bio_io_error(bio); } srcu_read_unlock(&head->srcu, srcu_idx); } static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode) { if (!nvme_tryget_ns_head(disk->private_data)) return -ENXIO; return 0; } static void nvme_ns_head_release(struct gendisk *disk) { nvme_put_ns_head(disk->private_data); } #ifdef CONFIG_BLK_DEV_ZONED static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) { struct nvme_ns_head *head = disk->private_data; struct nvme_ns *ns; int srcu_idx, ret = -EWOULDBLOCK; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); if (ns) ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); srcu_read_unlock(&head->srcu, srcu_idx); return ret; } #else #define nvme_ns_head_report_zones NULL #endif /* CONFIG_BLK_DEV_ZONED */ const struct block_device_operations nvme_ns_head_ops = { .owner = THIS_MODULE, .submit_bio = nvme_ns_head_submit_bio, .open = nvme_ns_head_open, .release = nvme_ns_head_release, .ioctl = nvme_ns_head_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .getgeo = nvme_getgeo, .report_zones = nvme_ns_head_report_zones, .pr_ops = &nvme_pr_ops, }; static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev) { return container_of(cdev, struct nvme_ns_head, cdev); } static int nvme_ns_head_chr_open(struct inode *inode, struct file *file) { if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev))) return -ENXIO; return 0; } static int nvme_ns_head_chr_release(struct inode *inode, struct file *file) { nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev)); return 0; } static const struct file_operations nvme_ns_head_chr_fops = { .owner = THIS_MODULE, .open = nvme_ns_head_chr_open, .release = nvme_ns_head_chr_release, .unlocked_ioctl = nvme_ns_head_chr_ioctl, .compat_ioctl = compat_ptr_ioctl, .uring_cmd = nvme_ns_head_chr_uring_cmd, .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, }; static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) { int ret; head->cdev_device.parent = &head->subsys->dev; ret = dev_set_name(&head->cdev_device, "ng%dn%d", head->subsys->instance, head->instance); if (ret) return ret; ret = nvme_cdev_add(&head->cdev, &head->cdev_device, &nvme_ns_head_chr_fops, THIS_MODULE); return ret; } static void nvme_requeue_work(struct work_struct *work) { struct nvme_ns_head *head = container_of(work, struct nvme_ns_head, requeue_work); struct bio *bio, *next; spin_lock_irq(&head->requeue_lock); next = bio_list_get(&head->requeue_list); spin_unlock_irq(&head->requeue_lock); while ((bio = next) != NULL) { next = bio->bi_next; bio->bi_next = NULL; submit_bio_noacct(bio); } } int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) { bool vwc = false; mutex_init(&head->lock); bio_list_init(&head->requeue_list); spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); /* * Add a multipath node if the subsystems supports multiple controllers. * We also do this for private namespaces as the namespace sharing flag * could change after a rescan. */ if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !nvme_is_unique_nsid(ctrl, head) || !multipath) return 0; head->disk = blk_alloc_disk(ctrl->numa_node); if (!head->disk) return -ENOMEM; head->disk->fops = &nvme_ns_head_ops; head->disk->private_data = head; sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue); /* * This assumes all controllers that refer to a namespace either * support poll queues or not. That is not a strict guarantee, * but if the assumption is wrong the effect is only suboptimal * performance but not correctness problem. */ if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL && ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); /* set to a default value of 512 until the disk is validated */ blk_queue_logical_block_size(head->disk->queue, 512); blk_set_stacking_limits(&head->disk->queue->limits); blk_queue_dma_alignment(head->disk->queue, 3); /* we need to propagate up the VMC settings */ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) vwc = true; blk_queue_write_cache(head->disk->queue, vwc, vwc); return 0; } static void nvme_mpath_set_live(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; int rc; if (!head->disk) return; /* * test_and_set_bit() is used because it is protecting against two nvme * paths simultaneously calling device_add_disk() on the same namespace * head. */ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { rc = device_add_disk(&head->subsys->dev, head->disk, nvme_ns_id_attr_groups); if (rc) { clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); return; } nvme_add_ns_head_cdev(head); } mutex_lock(&head->lock); if (nvme_path_is_optimized(ns)) { int node, srcu_idx; srcu_idx = srcu_read_lock(&head->srcu); for_each_node(node) __nvme_find_path(head, node); srcu_read_unlock(&head->srcu, srcu_idx); } mutex_unlock(&head->lock); synchronize_srcu(&head->srcu); kblockd_schedule_work(&head->requeue_work); } static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *, void *)) { void *base = ctrl->ana_log_buf; size_t offset = sizeof(struct nvme_ana_rsp_hdr); int error, i; lockdep_assert_held(&ctrl->ana_lock); for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) { struct nvme_ana_group_desc *desc = base + offset; u32 nr_nsids; size_t nsid_buf_size; if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc))) return -EINVAL; nr_nsids = le32_to_cpu(desc->nnsids); nsid_buf_size = flex_array_size(desc, nsids, nr_nsids); if (WARN_ON_ONCE(desc->grpid == 0)) return -EINVAL; if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax)) return -EINVAL; if (WARN_ON_ONCE(desc->state == 0)) return -EINVAL; if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE)) return -EINVAL; offset += sizeof(*desc); if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size)) return -EINVAL; error = cb(ctrl, desc, data); if (error) return error; offset += nsid_buf_size; } return 0; } static inline bool nvme_state_is_live(enum nvme_ana_state state) { return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED; } static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, struct nvme_ns *ns) { ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); /* * nvme_mpath_set_live() will trigger I/O to the multipath path device * and in turn to this path device. However we cannot accept this I/O * if the controller is not live. This may deadlock if called from * nvme_mpath_init_identify() and the ctrl will never complete * initialization, preventing I/O from completing. For this case we * will reprocess the ANA log page in nvme_mpath_update() once the * controller is ready. */ if (nvme_state_is_live(ns->ana_state) && ns->ctrl->state == NVME_CTRL_LIVE) nvme_mpath_set_live(ns); } static int nvme_update_ana_state(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0; unsigned *nr_change_groups = data; struct nvme_ns *ns; dev_dbg(ctrl->device, "ANA group %d: %s.\n", le32_to_cpu(desc->grpid), nvme_ana_state_names[desc->state]); if (desc->state == NVME_ANA_CHANGE) (*nr_change_groups)++; if (!nr_nsids) return 0; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { unsigned nsid; again: nsid = le32_to_cpu(desc->nsids[n]); if (ns->head->ns_id < nsid) continue; if (ns->head->ns_id == nsid) nvme_update_ns_ana_state(desc, ns); if (++n == nr_nsids) break; if (ns->head->ns_id > nsid) goto again; } up_read(&ctrl->namespaces_rwsem); return 0; } static int nvme_read_ana_log(struct nvme_ctrl *ctrl) { u32 nr_change_groups = 0; int error; mutex_lock(&ctrl->ana_lock); error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM, ctrl->ana_log_buf, ctrl->ana_log_size, 0); if (error) { dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error); goto out_unlock; } error = nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state); if (error) goto out_unlock; /* * In theory we should have an ANATT timer per group as they might enter * the change state at different times. But that is a lot of overhead * just to protect against a target that keeps entering new changes * states while never finishing previous ones. But we'll still * eventually time out once all groups are in change state, so this * isn't a big deal. * * We also double the ANATT value to provide some slack for transports * or AEN processing overhead. */ if (nr_change_groups) mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies); else del_timer_sync(&ctrl->anatt_timer); out_unlock: mutex_unlock(&ctrl->ana_lock); return error; } static void nvme_ana_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); if (ctrl->state != NVME_CTRL_LIVE) return; nvme_read_ana_log(ctrl); } void nvme_mpath_update(struct nvme_ctrl *ctrl) { u32 nr_change_groups = 0; if (!ctrl->ana_log_buf) return; mutex_lock(&ctrl->ana_lock); nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state); mutex_unlock(&ctrl->ana_lock); } static void nvme_anatt_timeout(struct timer_list *t) { struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); dev_info(ctrl->device, "ANATT timeout, resetting controller.\n"); nvme_reset_ctrl(ctrl); } void nvme_mpath_stop(struct nvme_ctrl *ctrl) { if (!nvme_ctrl_use_ana(ctrl)) return; del_timer_sync(&ctrl->anatt_timer); cancel_work_sync(&ctrl->ana_work); } #define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \ struct device_attribute subsys_attr_##_name = \ __ATTR(_name, _mode, _show, _store) static ssize_t nvme_subsys_iopolicy_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_subsystem *subsys = container_of(dev, struct nvme_subsystem, dev); return sysfs_emit(buf, "%s\n", nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); } static ssize_t nvme_subsys_iopolicy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_subsystem *subsys = container_of(dev, struct nvme_subsystem, dev); int i; for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) { if (sysfs_streq(buf, nvme_iopolicy_names[i])) { WRITE_ONCE(subsys->iopolicy, i); return count; } } return -EINVAL; } SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR, nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store); static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid); } DEVICE_ATTR_RO(ana_grpid); static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvme_ns *ns = nvme_get_ns_from_dev(dev); return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); } DEVICE_ATTR_RO(ana_state); static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { struct nvme_ana_group_desc *dst = data; if (desc->grpid != dst->grpid) return 0; *dst = *desc; return -ENXIO; /* just break out of the loop */ } void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) { if (nvme_ctrl_use_ana(ns->ctrl)) { struct nvme_ana_group_desc desc = { .grpid = anagrpid, .state = 0, }; mutex_lock(&ns->ctrl->ana_lock); ns->ana_grpid = le32_to_cpu(anagrpid); nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); mutex_unlock(&ns->ctrl->ana_lock); if (desc.state) { /* found the group desc: update */ nvme_update_ns_ana_state(&desc, ns); } else { /* group desc not found: trigger a re-read */ set_bit(NVME_NS_ANA_PENDING, &ns->flags); queue_work(nvme_wq, &ns->ctrl->ana_work); } } else { ns->ana_state = NVME_ANA_OPTIMIZED; nvme_mpath_set_live(ns); } if (blk_queue_stable_writes(ns->queue) && ns->head->disk) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->head->disk->queue); #ifdef CONFIG_BLK_DEV_ZONED if (blk_queue_is_zoned(ns->queue) && ns->head->disk) ns->head->disk->nr_zones = ns->disk->nr_zones; #endif } void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) { if (!head->disk) return; kblockd_schedule_work(&head->requeue_work); if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { nvme_cdev_del(&head->cdev, &head->cdev_device); del_gendisk(head->disk); } } void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); put_disk(head->disk); } void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) { mutex_init(&ctrl->ana_lock); timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); INIT_WORK(&ctrl->ana_work, nvme_ana_work); } int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT; size_t ana_log_size; int error = 0; /* check if multipath is enabled and we have the capability */ if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)) return 0; if (!ctrl->max_namespaces || ctrl->max_namespaces > le32_to_cpu(id->nn)) { dev_err(ctrl->device, "Invalid MNAN value %u\n", ctrl->max_namespaces); return -EINVAL; } ctrl->anacap = id->anacap; ctrl->anatt = id->anatt; ctrl->nanagrpid = le32_to_cpu(id->nanagrpid); ctrl->anagrpmax = le32_to_cpu(id->anagrpmax); ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) + ctrl->max_namespaces * sizeof(__le32); if (ana_log_size > max_transfer_size) { dev_err(ctrl->device, "ANA log page size (%zd) larger than MDTS (%zd).\n", ana_log_size, max_transfer_size); dev_err(ctrl->device, "disabling ANA support.\n"); goto out_uninit; } if (ana_log_size > ctrl->ana_log_size) { nvme_mpath_stop(ctrl); nvme_mpath_uninit(ctrl); ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL); if (!ctrl->ana_log_buf) return -ENOMEM; } ctrl->ana_log_size = ana_log_size; error = nvme_read_ana_log(ctrl); if (error) goto out_uninit; return 0; out_uninit: nvme_mpath_uninit(ctrl); return error; } void nvme_mpath_uninit(struct nvme_ctrl *ctrl) { kvfree(ctrl->ana_log_buf); ctrl->ana_log_buf = NULL; ctrl->ana_log_size = 0; }
linux-master
drivers/nvme/host/multipath.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016 Avago Technologies. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/parser.h> #include <uapi/scsi/fc/fc_fs.h> #include <uapi/scsi/fc/fc_els.h> #include <linux/delay.h> #include <linux/overflow.h> #include <linux/blk-cgroup.h> #include "nvme.h" #include "fabrics.h" #include <linux/nvme-fc-driver.h> #include <linux/nvme-fc.h> #include "fc.h" #include <scsi/scsi_transport_fc.h> #include <linux/blk-mq-pci.h> /* *************************** Data Structures/Defines ****************** */ enum nvme_fc_queue_flags { NVME_FC_Q_CONNECTED = 0, NVME_FC_Q_LIVE, }; #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ #define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects * when connected and a * connection failure. */ struct nvme_fc_queue { struct nvme_fc_ctrl *ctrl; struct device *dev; struct blk_mq_hw_ctx *hctx; void *lldd_handle; size_t cmnd_capsule_len; u32 qnum; u32 rqcnt; u32 seqno; u64 connection_id; atomic_t csn; unsigned long flags; } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ enum nvme_fcop_flags { FCOP_FLAGS_TERMIO = (1 << 0), FCOP_FLAGS_AEN = (1 << 1), }; struct nvmefc_ls_req_op { struct nvmefc_ls_req ls_req; struct nvme_fc_rport *rport; struct nvme_fc_queue *queue; struct request *rq; u32 flags; int ls_error; struct completion ls_done; struct list_head lsreq_list; /* rport->ls_req_list */ bool req_queued; }; struct nvmefc_ls_rcv_op { struct nvme_fc_rport *rport; struct nvmefc_ls_rsp *lsrsp; union nvmefc_ls_requests *rqstbuf; union nvmefc_ls_responses *rspbuf; u16 rqstdatalen; bool handled; dma_addr_t rspdma; struct list_head lsrcv_list; /* rport->ls_rcv_list */ } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ enum nvme_fcpop_state { FCPOP_STATE_UNINIT = 0, FCPOP_STATE_IDLE = 1, FCPOP_STATE_ACTIVE = 2, FCPOP_STATE_ABORTED = 3, FCPOP_STATE_COMPLETE = 4, }; struct nvme_fc_fcp_op { struct nvme_request nreq; /* * nvme/host/core.c * requires this to be * the 1st element in the * private structure * associated with the * request. */ struct nvmefc_fcp_req fcp_req; struct nvme_fc_ctrl *ctrl; struct nvme_fc_queue *queue; struct request *rq; atomic_t state; u32 flags; u32 rqno; u32 nents; struct nvme_fc_cmd_iu cmd_iu; struct nvme_fc_ersp_iu rsp_iu; }; struct nvme_fcp_op_w_sgl { struct nvme_fc_fcp_op op; struct scatterlist sgl[NVME_INLINE_SG_CNT]; uint8_t priv[]; }; struct nvme_fc_lport { struct nvme_fc_local_port localport; struct ida endp_cnt; struct list_head port_list; /* nvme_fc_port_list */ struct list_head endp_list; struct device *dev; /* physical device for dma */ struct nvme_fc_port_template *ops; struct kref ref; atomic_t act_rport_cnt; } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ struct nvme_fc_rport { struct nvme_fc_remote_port remoteport; struct list_head endp_list; /* for lport->endp_list */ struct list_head ctrl_list; struct list_head ls_req_list; struct list_head ls_rcv_list; struct list_head disc_list; struct device *dev; /* physical device for dma */ struct nvme_fc_lport *lport; spinlock_t lock; struct kref ref; atomic_t act_ctrl_cnt; unsigned long dev_loss_end; struct work_struct lsrcv_work; } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ /* fc_ctrl flags values - specified as bit positions */ #define ASSOC_ACTIVE 0 #define ASSOC_FAILED 1 #define FCCTRL_TERMIO 2 struct nvme_fc_ctrl { spinlock_t lock; struct nvme_fc_queue *queues; struct device *dev; struct nvme_fc_lport *lport; struct nvme_fc_rport *rport; u32 cnum; bool ioq_live; u64 association_id; struct nvmefc_ls_rcv_op *rcv_disconn; struct list_head ctrl_list; /* rport->ctrl_list */ struct blk_mq_tag_set admin_tag_set; struct blk_mq_tag_set tag_set; struct work_struct ioerr_work; struct delayed_work connect_work; struct kref ref; unsigned long flags; u32 iocnt; wait_queue_head_t ioabort_wait; struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; struct nvme_ctrl ctrl; }; static inline struct nvme_fc_ctrl * to_fc_ctrl(struct nvme_ctrl *ctrl) { return container_of(ctrl, struct nvme_fc_ctrl, ctrl); } static inline struct nvme_fc_lport * localport_to_lport(struct nvme_fc_local_port *portptr) { return container_of(portptr, struct nvme_fc_lport, localport); } static inline struct nvme_fc_rport * remoteport_to_rport(struct nvme_fc_remote_port *portptr) { return container_of(portptr, struct nvme_fc_rport, remoteport); } static inline struct nvmefc_ls_req_op * ls_req_to_lsop(struct nvmefc_ls_req *lsreq) { return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); } static inline struct nvme_fc_fcp_op * fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) { return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); } /* *************************** Globals **************************** */ static DEFINE_SPINLOCK(nvme_fc_lock); static LIST_HEAD(nvme_fc_lport_list); static DEFINE_IDA(nvme_fc_local_port_cnt); static DEFINE_IDA(nvme_fc_ctrl_cnt); static struct workqueue_struct *nvme_fc_wq; static bool nvme_fc_waiting_to_unload; static DECLARE_COMPLETION(nvme_fc_unload_proceed); /* * These items are short-term. They will eventually be moved into * a generic FC class. See comments in module init. */ static struct device *fc_udev_device; static void nvme_fc_complete_rq(struct request *rq); /* *********************** FC-NVME Port Management ************************ */ static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, struct nvme_fc_queue *, unsigned int); static void nvme_fc_handle_ls_rqst_work(struct work_struct *work); static void nvme_fc_free_lport(struct kref *ref) { struct nvme_fc_lport *lport = container_of(ref, struct nvme_fc_lport, ref); unsigned long flags; WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); WARN_ON(!list_empty(&lport->endp_list)); /* remove from transport list */ spin_lock_irqsave(&nvme_fc_lock, flags); list_del(&lport->port_list); if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) complete(&nvme_fc_unload_proceed); spin_unlock_irqrestore(&nvme_fc_lock, flags); ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num); ida_destroy(&lport->endp_cnt); put_device(lport->dev); kfree(lport); } static void nvme_fc_lport_put(struct nvme_fc_lport *lport) { kref_put(&lport->ref, nvme_fc_free_lport); } static int nvme_fc_lport_get(struct nvme_fc_lport *lport) { return kref_get_unless_zero(&lport->ref); } static struct nvme_fc_lport * nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, struct nvme_fc_port_template *ops, struct device *dev) { struct nvme_fc_lport *lport; unsigned long flags; spin_lock_irqsave(&nvme_fc_lock, flags); list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { if (lport->localport.node_name != pinfo->node_name || lport->localport.port_name != pinfo->port_name) continue; if (lport->dev != dev) { lport = ERR_PTR(-EXDEV); goto out_done; } if (lport->localport.port_state != FC_OBJSTATE_DELETED) { lport = ERR_PTR(-EEXIST); goto out_done; } if (!nvme_fc_lport_get(lport)) { /* * fails if ref cnt already 0. If so, * act as if lport already deleted */ lport = NULL; goto out_done; } /* resume the lport */ lport->ops = ops; lport->localport.port_role = pinfo->port_role; lport->localport.port_id = pinfo->port_id; lport->localport.port_state = FC_OBJSTATE_ONLINE; spin_unlock_irqrestore(&nvme_fc_lock, flags); return lport; } lport = NULL; out_done: spin_unlock_irqrestore(&nvme_fc_lock, flags); return lport; } /** * nvme_fc_register_localport - transport entry point called by an * LLDD to register the existence of a NVME * host FC port. * @pinfo: pointer to information about the port to be registered * @template: LLDD entrypoints and operational parameters for the port * @dev: physical hardware device node port corresponds to. Will be * used for DMA mappings * @portptr: pointer to a local port pointer. Upon success, the routine * will allocate a nvme_fc_local_port structure and place its * address in the local port pointer. Upon failure, local port * pointer will be set to 0. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure. */ int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, struct nvme_fc_port_template *template, struct device *dev, struct nvme_fc_local_port **portptr) { struct nvme_fc_lport *newrec; unsigned long flags; int ret, idx; if (!template->localport_delete || !template->remoteport_delete || !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || !template->max_dif_sgl_segments || !template->dma_boundary) { ret = -EINVAL; goto out_reghost_failed; } /* * look to see if there is already a localport that had been * deregistered and in the process of waiting for all the * references to fully be removed. If the references haven't * expired, we can simply re-enable the localport. Remoteports * and controller reconnections should resume naturally. */ newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); /* found an lport, but something about its state is bad */ if (IS_ERR(newrec)) { ret = PTR_ERR(newrec); goto out_reghost_failed; /* found existing lport, which was resumed */ } else if (newrec) { *portptr = &newrec->localport; return 0; } /* nothing found - allocate a new localport struct */ newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), GFP_KERNEL); if (!newrec) { ret = -ENOMEM; goto out_reghost_failed; } idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; goto out_fail_kfree; } if (!get_device(dev) && dev) { ret = -ENODEV; goto out_ida_put; } INIT_LIST_HEAD(&newrec->port_list); INIT_LIST_HEAD(&newrec->endp_list); kref_init(&newrec->ref); atomic_set(&newrec->act_rport_cnt, 0); newrec->ops = template; newrec->dev = dev; ida_init(&newrec->endp_cnt); if (template->local_priv_sz) newrec->localport.private = &newrec[1]; else newrec->localport.private = NULL; newrec->localport.node_name = pinfo->node_name; newrec->localport.port_name = pinfo->port_name; newrec->localport.port_role = pinfo->port_role; newrec->localport.port_id = pinfo->port_id; newrec->localport.port_state = FC_OBJSTATE_ONLINE; newrec->localport.port_num = idx; spin_lock_irqsave(&nvme_fc_lock, flags); list_add_tail(&newrec->port_list, &nvme_fc_lport_list); spin_unlock_irqrestore(&nvme_fc_lock, flags); if (dev) dma_set_seg_boundary(dev, template->dma_boundary); *portptr = &newrec->localport; return 0; out_ida_put: ida_free(&nvme_fc_local_port_cnt, idx); out_fail_kfree: kfree(newrec); out_reghost_failed: *portptr = NULL; return ret; } EXPORT_SYMBOL_GPL(nvme_fc_register_localport); /** * nvme_fc_unregister_localport - transport entry point called by an * LLDD to deregister/remove a previously * registered a NVME host FC port. * @portptr: pointer to the (registered) local port that is to be deregistered. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure. */ int nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) { struct nvme_fc_lport *lport = localport_to_lport(portptr); unsigned long flags; if (!portptr) return -EINVAL; spin_lock_irqsave(&nvme_fc_lock, flags); if (portptr->port_state != FC_OBJSTATE_ONLINE) { spin_unlock_irqrestore(&nvme_fc_lock, flags); return -EINVAL; } portptr->port_state = FC_OBJSTATE_DELETED; spin_unlock_irqrestore(&nvme_fc_lock, flags); if (atomic_read(&lport->act_rport_cnt) == 0) lport->ops->localport_delete(&lport->localport); nvme_fc_lport_put(lport); return 0; } EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); /* * TRADDR strings, per FC-NVME are fixed format: * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters * udev event will only differ by prefix of what field is * being specified: * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters * 19 + 43 + null_fudge = 64 characters */ #define FCNVME_TRADDR_LENGTH 64 static void nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) { char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) return; snprintf(hostaddr, sizeof(hostaddr), "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", lport->localport.node_name, lport->localport.port_name); snprintf(tgtaddr, sizeof(tgtaddr), "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", rport->remoteport.node_name, rport->remoteport.port_name); kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); } static void nvme_fc_free_rport(struct kref *ref) { struct nvme_fc_rport *rport = container_of(ref, struct nvme_fc_rport, ref); struct nvme_fc_lport *lport = localport_to_lport(rport->remoteport.localport); unsigned long flags; WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); WARN_ON(!list_empty(&rport->ctrl_list)); /* remove from lport list */ spin_lock_irqsave(&nvme_fc_lock, flags); list_del(&rport->endp_list); spin_unlock_irqrestore(&nvme_fc_lock, flags); WARN_ON(!list_empty(&rport->disc_list)); ida_free(&lport->endp_cnt, rport->remoteport.port_num); kfree(rport); nvme_fc_lport_put(lport); } static void nvme_fc_rport_put(struct nvme_fc_rport *rport) { kref_put(&rport->ref, nvme_fc_free_rport); } static int nvme_fc_rport_get(struct nvme_fc_rport *rport) { return kref_get_unless_zero(&rport->ref); } static void nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) { switch (ctrl->ctrl.state) { case NVME_CTRL_NEW: case NVME_CTRL_CONNECTING: /* * As all reconnects were suppressed, schedule a * connect. */ dev_info(ctrl->ctrl.device, "NVME-FC{%d}: connectivity re-established. " "Attempting reconnect\n", ctrl->cnum); queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); break; case NVME_CTRL_RESETTING: /* * Controller is already in the process of terminating the * association. No need to do anything further. The reconnect * step will naturally occur after the reset completes. */ break; default: /* no action to take - let it delete */ break; } } static struct nvme_fc_rport * nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, struct nvme_fc_port_info *pinfo) { struct nvme_fc_rport *rport; struct nvme_fc_ctrl *ctrl; unsigned long flags; spin_lock_irqsave(&nvme_fc_lock, flags); list_for_each_entry(rport, &lport->endp_list, endp_list) { if (rport->remoteport.node_name != pinfo->node_name || rport->remoteport.port_name != pinfo->port_name) continue; if (!nvme_fc_rport_get(rport)) { rport = ERR_PTR(-ENOLCK); goto out_done; } spin_unlock_irqrestore(&nvme_fc_lock, flags); spin_lock_irqsave(&rport->lock, flags); /* has it been unregistered */ if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { /* means lldd called us twice */ spin_unlock_irqrestore(&rport->lock, flags); nvme_fc_rport_put(rport); return ERR_PTR(-ESTALE); } rport->remoteport.port_role = pinfo->port_role; rport->remoteport.port_id = pinfo->port_id; rport->remoteport.port_state = FC_OBJSTATE_ONLINE; rport->dev_loss_end = 0; /* * kick off a reconnect attempt on all associations to the * remote port. A successful reconnects will resume i/o. */ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) nvme_fc_resume_controller(ctrl); spin_unlock_irqrestore(&rport->lock, flags); return rport; } rport = NULL; out_done: spin_unlock_irqrestore(&nvme_fc_lock, flags); return rport; } static inline void __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, struct nvme_fc_port_info *pinfo) { if (pinfo->dev_loss_tmo) rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; else rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; } /** * nvme_fc_register_remoteport - transport entry point called by an * LLDD to register the existence of a NVME * subsystem FC port on its fabric. * @localport: pointer to the (registered) local port that the remote * subsystem port is connected to. * @pinfo: pointer to information about the port to be registered * @portptr: pointer to a remote port pointer. Upon success, the routine * will allocate a nvme_fc_remote_port structure and place its * address in the remote port pointer. Upon failure, remote port * pointer will be set to 0. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure. */ int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, struct nvme_fc_port_info *pinfo, struct nvme_fc_remote_port **portptr) { struct nvme_fc_lport *lport = localport_to_lport(localport); struct nvme_fc_rport *newrec; unsigned long flags; int ret, idx; if (!nvme_fc_lport_get(lport)) { ret = -ESHUTDOWN; goto out_reghost_failed; } /* * look to see if there is already a remoteport that is waiting * for a reconnect (within dev_loss_tmo) with the same WWN's. * If so, transition to it and reconnect. */ newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); /* found an rport, but something about its state is bad */ if (IS_ERR(newrec)) { ret = PTR_ERR(newrec); goto out_lport_put; /* found existing rport, which was resumed */ } else if (newrec) { nvme_fc_lport_put(lport); __nvme_fc_set_dev_loss_tmo(newrec, pinfo); nvme_fc_signal_discovery_scan(lport, newrec); *portptr = &newrec->remoteport; return 0; } /* nothing found - allocate a new remoteport struct */ newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), GFP_KERNEL); if (!newrec) { ret = -ENOMEM; goto out_lport_put; } idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; goto out_kfree_rport; } INIT_LIST_HEAD(&newrec->endp_list); INIT_LIST_HEAD(&newrec->ctrl_list); INIT_LIST_HEAD(&newrec->ls_req_list); INIT_LIST_HEAD(&newrec->disc_list); kref_init(&newrec->ref); atomic_set(&newrec->act_ctrl_cnt, 0); spin_lock_init(&newrec->lock); newrec->remoteport.localport = &lport->localport; INIT_LIST_HEAD(&newrec->ls_rcv_list); newrec->dev = lport->dev; newrec->lport = lport; if (lport->ops->remote_priv_sz) newrec->remoteport.private = &newrec[1]; else newrec->remoteport.private = NULL; newrec->remoteport.port_role = pinfo->port_role; newrec->remoteport.node_name = pinfo->node_name; newrec->remoteport.port_name = pinfo->port_name; newrec->remoteport.port_id = pinfo->port_id; newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; newrec->remoteport.port_num = idx; __nvme_fc_set_dev_loss_tmo(newrec, pinfo); INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); spin_lock_irqsave(&nvme_fc_lock, flags); list_add_tail(&newrec->endp_list, &lport->endp_list); spin_unlock_irqrestore(&nvme_fc_lock, flags); nvme_fc_signal_discovery_scan(lport, newrec); *portptr = &newrec->remoteport; return 0; out_kfree_rport: kfree(newrec); out_lport_put: nvme_fc_lport_put(lport); out_reghost_failed: *portptr = NULL; return ret; } EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); static int nvme_fc_abort_lsops(struct nvme_fc_rport *rport) { struct nvmefc_ls_req_op *lsop; unsigned long flags; restart: spin_lock_irqsave(&rport->lock, flags); list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { lsop->flags |= FCOP_FLAGS_TERMIO; spin_unlock_irqrestore(&rport->lock, flags); rport->lport->ops->ls_abort(&rport->lport->localport, &rport->remoteport, &lsop->ls_req); goto restart; } } spin_unlock_irqrestore(&rport->lock, flags); return 0; } static void nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) { dev_info(ctrl->ctrl.device, "NVME-FC{%d}: controller connectivity lost. Awaiting " "Reconnect", ctrl->cnum); switch (ctrl->ctrl.state) { case NVME_CTRL_NEW: case NVME_CTRL_LIVE: /* * Schedule a controller reset. The reset will terminate the * association and schedule the reconnect timer. Reconnects * will be attempted until either the ctlr_loss_tmo * (max_retries * connect_delay) expires or the remoteport's * dev_loss_tmo expires. */ if (nvme_reset_ctrl(&ctrl->ctrl)) { dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: Couldn't schedule reset.\n", ctrl->cnum); nvme_delete_ctrl(&ctrl->ctrl); } break; case NVME_CTRL_CONNECTING: /* * The association has already been terminated and the * controller is attempting reconnects. No need to do anything * futher. Reconnects will be attempted until either the * ctlr_loss_tmo (max_retries * connect_delay) expires or the * remoteport's dev_loss_tmo expires. */ break; case NVME_CTRL_RESETTING: /* * Controller is already in the process of terminating the * association. No need to do anything further. The reconnect * step will kick in naturally after the association is * terminated. */ break; case NVME_CTRL_DELETING: case NVME_CTRL_DELETING_NOIO: default: /* no action to take - let it delete */ break; } } /** * nvme_fc_unregister_remoteport - transport entry point called by an * LLDD to deregister/remove a previously * registered a NVME subsystem FC port. * @portptr: pointer to the (registered) remote port that is to be * deregistered. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure. */ int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) { struct nvme_fc_rport *rport = remoteport_to_rport(portptr); struct nvme_fc_ctrl *ctrl; unsigned long flags; if (!portptr) return -EINVAL; spin_lock_irqsave(&rport->lock, flags); if (portptr->port_state != FC_OBJSTATE_ONLINE) { spin_unlock_irqrestore(&rport->lock, flags); return -EINVAL; } portptr->port_state = FC_OBJSTATE_DELETED; rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { /* if dev_loss_tmo==0, dev loss is immediate */ if (!portptr->dev_loss_tmo) { dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: controller connectivity lost.\n", ctrl->cnum); nvme_delete_ctrl(&ctrl->ctrl); } else nvme_fc_ctrl_connectivity_loss(ctrl); } spin_unlock_irqrestore(&rport->lock, flags); nvme_fc_abort_lsops(rport); if (atomic_read(&rport->act_ctrl_cnt) == 0) rport->lport->ops->remoteport_delete(portptr); /* * release the reference, which will allow, if all controllers * go away, which should only occur after dev_loss_tmo occurs, * for the rport to be torn down. */ nvme_fc_rport_put(rport); return 0; } EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); /** * nvme_fc_rescan_remoteport - transport entry point called by an * LLDD to request a nvme device rescan. * @remoteport: pointer to the (registered) remote port that is to be * rescanned. * * Returns: N/A */ void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) { struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); nvme_fc_signal_discovery_scan(rport->lport, rport); } EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, u32 dev_loss_tmo) { struct nvme_fc_rport *rport = remoteport_to_rport(portptr); unsigned long flags; spin_lock_irqsave(&rport->lock, flags); if (portptr->port_state != FC_OBJSTATE_ONLINE) { spin_unlock_irqrestore(&rport->lock, flags); return -EINVAL; } /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ rport->remoteport.dev_loss_tmo = dev_loss_tmo; spin_unlock_irqrestore(&rport->lock, flags); return 0; } EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); /* *********************** FC-NVME DMA Handling **************************** */ /* * The fcloop device passes in a NULL device pointer. Real LLD's will * pass in a valid device pointer. If NULL is passed to the dma mapping * routines, depending on the platform, it may or may not succeed, and * may crash. * * As such: * Wrapper all the dma routines and check the dev pointer. * * If simple mappings (return just a dma address, we'll noop them, * returning a dma address of 0. * * On more complex mappings (dma_map_sg), a pseudo routine fills * in the scatter list, setting all dma addresses to 0. */ static inline dma_addr_t fc_dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; } static inline int fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dev ? dma_mapping_error(dev, dma_addr) : 0; } static inline void fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (dev) dma_unmap_single(dev, addr, size, dir); } static inline void fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (dev) dma_sync_single_for_cpu(dev, addr, size, dir); } static inline void fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (dev) dma_sync_single_for_device(dev, addr, size, dir); } /* pseudo dma_map_sg call */ static int fc_map_sg(struct scatterlist *sg, int nents) { struct scatterlist *s; int i; WARN_ON(nents == 0 || sg[0].length == 0); for_each_sg(sg, s, nents, i) { s->dma_address = 0L; #ifdef CONFIG_NEED_SG_DMA_LENGTH s->dma_length = s->length; #endif } return nents; } static inline int fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); } static inline void fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { if (dev) dma_unmap_sg(dev, sg, nents, dir); } /* *********************** FC-NVME LS Handling **************************** */ static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); static void __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) { struct nvme_fc_rport *rport = lsop->rport; struct nvmefc_ls_req *lsreq = &lsop->ls_req; unsigned long flags; spin_lock_irqsave(&rport->lock, flags); if (!lsop->req_queued) { spin_unlock_irqrestore(&rport->lock, flags); return; } list_del(&lsop->lsreq_list); lsop->req_queued = false; spin_unlock_irqrestore(&rport->lock, flags); fc_dma_unmap_single(rport->dev, lsreq->rqstdma, (lsreq->rqstlen + lsreq->rsplen), DMA_BIDIRECTIONAL); nvme_fc_rport_put(rport); } static int __nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop, void (*done)(struct nvmefc_ls_req *req, int status)) { struct nvmefc_ls_req *lsreq = &lsop->ls_req; unsigned long flags; int ret = 0; if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) return -ECONNREFUSED; if (!nvme_fc_rport_get(rport)) return -ESHUTDOWN; lsreq->done = done; lsop->rport = rport; lsop->req_queued = false; INIT_LIST_HEAD(&lsop->lsreq_list); init_completion(&lsop->ls_done); lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, lsreq->rqstlen + lsreq->rsplen, DMA_BIDIRECTIONAL); if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { ret = -EFAULT; goto out_putrport; } lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; spin_lock_irqsave(&rport->lock, flags); list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); lsop->req_queued = true; spin_unlock_irqrestore(&rport->lock, flags); ret = rport->lport->ops->ls_req(&rport->lport->localport, &rport->remoteport, lsreq); if (ret) goto out_unlink; return 0; out_unlink: lsop->ls_error = ret; spin_lock_irqsave(&rport->lock, flags); lsop->req_queued = false; list_del(&lsop->lsreq_list); spin_unlock_irqrestore(&rport->lock, flags); fc_dma_unmap_single(rport->dev, lsreq->rqstdma, (lsreq->rqstlen + lsreq->rsplen), DMA_BIDIRECTIONAL); out_putrport: nvme_fc_rport_put(rport); return ret; } static void nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) { struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); lsop->ls_error = status; complete(&lsop->ls_done); } static int nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) { struct nvmefc_ls_req *lsreq = &lsop->ls_req; struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; int ret; ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); if (!ret) { /* * No timeout/not interruptible as we need the struct * to exist until the lldd calls us back. Thus mandate * wait until driver calls back. lldd responsible for * the timeout action */ wait_for_completion(&lsop->ls_done); __nvme_fc_finish_ls_req(lsop); ret = lsop->ls_error; } if (ret) return ret; /* ACC or RJT payload ? */ if (rjt->w0.ls_cmd == FCNVME_LS_RJT) return -ENXIO; return 0; } static int nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop, void (*done)(struct nvmefc_ls_req *req, int status)) { /* don't wait for completion */ return __nvme_fc_send_ls_req(rport, lsop, done); } static int nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) { struct nvmefc_ls_req_op *lsop; struct nvmefc_ls_req *lsreq; struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; struct fcnvme_ls_cr_assoc_acc *assoc_acc; unsigned long flags; int ret, fcret = 0; lsop = kzalloc((sizeof(*lsop) + sizeof(*assoc_rqst) + sizeof(*assoc_acc) + ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); if (!lsop) { dev_info(ctrl->ctrl.device, "NVME-FC{%d}: send Create Association failed: ENOMEM\n", ctrl->cnum); ret = -ENOMEM; goto out_no_memory; } assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1]; assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; lsreq = &lsop->ls_req; if (ctrl->lport->ops->lsrqst_priv_sz) lsreq->private = &assoc_acc[1]; else lsreq->private = NULL; assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; assoc_rqst->desc_list_len = cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); assoc_rqst->assoc_cmd.desc_tag = cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); assoc_rqst->assoc_cmd.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); /* Linux supports only Dynamic controllers */ assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); lsop->queue = queue; lsreq->rqstaddr = assoc_rqst; lsreq->rqstlen = sizeof(*assoc_rqst); lsreq->rspaddr = assoc_acc; lsreq->rsplen = sizeof(*assoc_acc); lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; ret = nvme_fc_send_ls_req(ctrl->rport, lsop); if (ret) goto out_free_buffer; /* process connect LS completion */ /* validate the ACC response */ if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) fcret = VERR_LSACC; else if (assoc_acc->hdr.desc_list_len != fcnvme_lsdesc_len( sizeof(struct fcnvme_ls_cr_assoc_acc))) fcret = VERR_CR_ASSOC_ACC_LEN; else if (assoc_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) fcret = VERR_LSDESC_RQST; else if (assoc_acc->hdr.rqst.desc_len != fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) fcret = VERR_LSDESC_RQST_LEN; else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) fcret = VERR_CR_ASSOC; else if (assoc_acc->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) fcret = VERR_ASSOC_ID; else if (assoc_acc->associd.desc_len != fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_assoc_id))) fcret = VERR_ASSOC_ID_LEN; else if (assoc_acc->connectid.desc_tag != cpu_to_be32(FCNVME_LSDESC_CONN_ID)) fcret = VERR_CONN_ID; else if (assoc_acc->connectid.desc_len != fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) fcret = VERR_CONN_ID_LEN; if (fcret) { ret = -EBADF; dev_err(ctrl->dev, "q %d Create Association LS failed: %s\n", queue->qnum, validation_errors[fcret]); } else { spin_lock_irqsave(&ctrl->lock, flags); ctrl->association_id = be64_to_cpu(assoc_acc->associd.association_id); queue->connection_id = be64_to_cpu(assoc_acc->connectid.connection_id); set_bit(NVME_FC_Q_CONNECTED, &queue->flags); spin_unlock_irqrestore(&ctrl->lock, flags); } out_free_buffer: kfree(lsop); out_no_memory: if (ret) dev_err(ctrl->dev, "queue %d connect admin queue failed (%d).\n", queue->qnum, ret); return ret; } static int nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) { struct nvmefc_ls_req_op *lsop; struct nvmefc_ls_req *lsreq; struct fcnvme_ls_cr_conn_rqst *conn_rqst; struct fcnvme_ls_cr_conn_acc *conn_acc; int ret, fcret = 0; lsop = kzalloc((sizeof(*lsop) + sizeof(*conn_rqst) + sizeof(*conn_acc) + ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); if (!lsop) { dev_info(ctrl->ctrl.device, "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", ctrl->cnum); ret = -ENOMEM; goto out_no_memory; } conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1]; conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; lsreq = &lsop->ls_req; if (ctrl->lport->ops->lsrqst_priv_sz) lsreq->private = (void *)&conn_acc[1]; else lsreq->private = NULL; conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; conn_rqst->desc_list_len = cpu_to_be32( sizeof(struct fcnvme_lsdesc_assoc_id) + sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); conn_rqst->associd.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_assoc_id)); conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); conn_rqst->connect_cmd.desc_tag = cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); conn_rqst->connect_cmd.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); lsop->queue = queue; lsreq->rqstaddr = conn_rqst; lsreq->rqstlen = sizeof(*conn_rqst); lsreq->rspaddr = conn_acc; lsreq->rsplen = sizeof(*conn_acc); lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; ret = nvme_fc_send_ls_req(ctrl->rport, lsop); if (ret) goto out_free_buffer; /* process connect LS completion */ /* validate the ACC response */ if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) fcret = VERR_LSACC; else if (conn_acc->hdr.desc_list_len != fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) fcret = VERR_CR_CONN_ACC_LEN; else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) fcret = VERR_LSDESC_RQST; else if (conn_acc->hdr.rqst.desc_len != fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) fcret = VERR_LSDESC_RQST_LEN; else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) fcret = VERR_CR_CONN; else if (conn_acc->connectid.desc_tag != cpu_to_be32(FCNVME_LSDESC_CONN_ID)) fcret = VERR_CONN_ID; else if (conn_acc->connectid.desc_len != fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) fcret = VERR_CONN_ID_LEN; if (fcret) { ret = -EBADF; dev_err(ctrl->dev, "q %d Create I/O Connection LS failed: %s\n", queue->qnum, validation_errors[fcret]); } else { queue->connection_id = be64_to_cpu(conn_acc->connectid.connection_id); set_bit(NVME_FC_Q_CONNECTED, &queue->flags); } out_free_buffer: kfree(lsop); out_no_memory: if (ret) dev_err(ctrl->dev, "queue %d connect I/O queue failed (%d).\n", queue->qnum, ret); return ret; } static void nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) { struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); __nvme_fc_finish_ls_req(lsop); /* fc-nvme initiator doesn't care about success or failure of cmd */ kfree(lsop); } /* * This routine sends a FC-NVME LS to disconnect (aka terminate) * the FC-NVME Association. Terminating the association also * terminates the FC-NVME connections (per queue, both admin and io * queues) that are part of the association. E.g. things are torn * down, and the related FC-NVME Association ID and Connection IDs * become invalid. * * The behavior of the fc-nvme initiator is such that it's * understanding of the association and connections will implicitly * be torn down. The action is implicit as it may be due to a loss of * connectivity with the fc-nvme target, so you may never get a * response even if you tried. As such, the action of this routine * is to asynchronously send the LS, ignore any results of the LS, and * continue on with terminating the association. If the fc-nvme target * is present and receives the LS, it too can tear down. */ static void nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) { struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; struct fcnvme_ls_disconnect_assoc_acc *discon_acc; struct nvmefc_ls_req_op *lsop; struct nvmefc_ls_req *lsreq; int ret; lsop = kzalloc((sizeof(*lsop) + sizeof(*discon_rqst) + sizeof(*discon_acc) + ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); if (!lsop) { dev_info(ctrl->ctrl.device, "NVME-FC{%d}: send Disconnect Association " "failed: ENOMEM\n", ctrl->cnum); return; } discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; lsreq = &lsop->ls_req; if (ctrl->lport->ops->lsrqst_priv_sz) lsreq->private = (void *)&discon_acc[1]; else lsreq->private = NULL; nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, ctrl->association_id); ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, nvme_fc_disconnect_assoc_done); if (ret) kfree(lsop); } static void nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) { struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; struct nvme_fc_rport *rport = lsop->rport; struct nvme_fc_lport *lport = rport->lport; unsigned long flags; spin_lock_irqsave(&rport->lock, flags); list_del(&lsop->lsrcv_list); spin_unlock_irqrestore(&rport->lock, flags); fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, sizeof(*lsop->rspbuf), DMA_TO_DEVICE); fc_dma_unmap_single(lport->dev, lsop->rspdma, sizeof(*lsop->rspbuf), DMA_TO_DEVICE); kfree(lsop->rspbuf); kfree(lsop->rqstbuf); kfree(lsop); nvme_fc_rport_put(rport); } static void nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) { struct nvme_fc_rport *rport = lsop->rport; struct nvme_fc_lport *lport = rport->lport; struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; int ret; fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, sizeof(*lsop->rspbuf), DMA_TO_DEVICE); ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, lsop->lsrsp); if (ret) { dev_warn(lport->dev, "LLDD rejected LS RSP xmt: LS %d status %d\n", w0->ls_cmd, ret); nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); return; } } static struct nvme_fc_ctrl * nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, struct nvmefc_ls_rcv_op *lsop) { struct fcnvme_ls_disconnect_assoc_rqst *rqst = &lsop->rqstbuf->rq_dis_assoc; struct nvme_fc_ctrl *ctrl, *ret = NULL; struct nvmefc_ls_rcv_op *oldls = NULL; u64 association_id = be64_to_cpu(rqst->associd.association_id); unsigned long flags; spin_lock_irqsave(&rport->lock, flags); list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { if (!nvme_fc_ctrl_get(ctrl)) continue; spin_lock(&ctrl->lock); if (association_id == ctrl->association_id) { oldls = ctrl->rcv_disconn; ctrl->rcv_disconn = lsop; ret = ctrl; } spin_unlock(&ctrl->lock); if (ret) /* leave the ctrl get reference */ break; nvme_fc_ctrl_put(ctrl); } spin_unlock_irqrestore(&rport->lock, flags); /* transmit a response for anything that was pending */ if (oldls) { dev_info(rport->lport->dev, "NVME-FC{%d}: Multiple Disconnect Association " "LS's received\n", ctrl->cnum); /* overwrite good response with bogus failure */ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, sizeof(*oldls->rspbuf), rqst->w0.ls_cmd, FCNVME_RJT_RC_UNAB, FCNVME_RJT_EXP_NONE, 0); nvme_fc_xmt_ls_rsp(oldls); } return ret; } /* * returns true to mean LS handled and ls_rsp can be sent * returns false to defer ls_rsp xmt (will be done as part of * association termination) */ static bool nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop) { struct nvme_fc_rport *rport = lsop->rport; struct fcnvme_ls_disconnect_assoc_rqst *rqst = &lsop->rqstbuf->rq_dis_assoc; struct fcnvme_ls_disconnect_assoc_acc *acc = &lsop->rspbuf->rsp_dis_assoc; struct nvme_fc_ctrl *ctrl = NULL; int ret = 0; memset(acc, 0, sizeof(*acc)); ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); if (!ret) { /* match an active association */ ctrl = nvme_fc_match_disconn_ls(rport, lsop); if (!ctrl) ret = VERR_NO_ASSOC; } if (ret) { dev_info(rport->lport->dev, "Disconnect LS failed: %s\n", validation_errors[ret]); lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? FCNVME_RJT_RC_INV_ASSOC : FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); return true; } /* format an ACCept response */ lsop->lsrsp->rsplen = sizeof(*acc); nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( sizeof(struct fcnvme_ls_disconnect_assoc_acc)), FCNVME_LS_DISCONNECT_ASSOC); /* * the transmit of the response will occur after the exchanges * for the association have been ABTS'd by * nvme_fc_delete_association(). */ /* fail the association */ nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); /* release the reference taken by nvme_fc_match_disconn_ls() */ nvme_fc_ctrl_put(ctrl); return false; } /* * Actual Processing routine for received FC-NVME LS Requests from the LLD * returns true if a response should be sent afterward, false if rsp will * be sent asynchronously. */ static bool nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop) { struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; bool ret = true; lsop->lsrsp->nvme_fc_private = lsop; lsop->lsrsp->rspbuf = lsop->rspbuf; lsop->lsrsp->rspdma = lsop->rspdma; lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; /* Be preventative. handlers will later set to valid length */ lsop->lsrsp->rsplen = 0; /* * handlers: * parse request input, execute the request, and format the * LS response */ switch (w0->ls_cmd) { case FCNVME_LS_DISCONNECT_ASSOC: ret = nvme_fc_ls_disconnect_assoc(lsop); break; case FCNVME_LS_DISCONNECT_CONN: lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, sizeof(*lsop->rspbuf), w0->ls_cmd, FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); break; case FCNVME_LS_CREATE_ASSOCIATION: case FCNVME_LS_CREATE_CONNECTION: lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, sizeof(*lsop->rspbuf), w0->ls_cmd, FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); break; default: lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, sizeof(*lsop->rspbuf), w0->ls_cmd, FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); break; } return(ret); } static void nvme_fc_handle_ls_rqst_work(struct work_struct *work) { struct nvme_fc_rport *rport = container_of(work, struct nvme_fc_rport, lsrcv_work); struct fcnvme_ls_rqst_w0 *w0; struct nvmefc_ls_rcv_op *lsop; unsigned long flags; bool sendrsp; restart: sendrsp = true; spin_lock_irqsave(&rport->lock, flags); list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { if (lsop->handled) continue; lsop->handled = true; if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { spin_unlock_irqrestore(&rport->lock, flags); sendrsp = nvme_fc_handle_ls_rqst(lsop); } else { spin_unlock_irqrestore(&rport->lock, flags); w0 = &lsop->rqstbuf->w0; lsop->lsrsp->rsplen = nvme_fc_format_rjt( lsop->rspbuf, sizeof(*lsop->rspbuf), w0->ls_cmd, FCNVME_RJT_RC_UNAB, FCNVME_RJT_EXP_NONE, 0); } if (sendrsp) nvme_fc_xmt_ls_rsp(lsop); goto restart; } spin_unlock_irqrestore(&rport->lock, flags); } static void nvme_fc_rcv_ls_req_err_msg(struct nvme_fc_lport *lport, struct fcnvme_ls_rqst_w0 *w0) { dev_info(lport->dev, "RCV %s LS failed: No memory\n", (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); } /** * nvme_fc_rcv_ls_req - transport entry point called by an LLDD * upon the reception of a NVME LS request. * * The nvme-fc layer will copy payload to an internal structure for * processing. As such, upon completion of the routine, the LLDD may * immediately free/reuse the LS request buffer passed in the call. * * If this routine returns error, the LLDD should abort the exchange. * * @portptr: pointer to the (registered) remote port that the LS * was received from. The remoteport is associated with * a specific localport. * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be * used to reference the exchange corresponding to the LS * when issuing an ls response. * @lsreqbuf: pointer to the buffer containing the LS Request * @lsreqbuf_len: length, in bytes, of the received LS request */ int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, struct nvmefc_ls_rsp *lsrsp, void *lsreqbuf, u32 lsreqbuf_len) { struct nvme_fc_rport *rport = remoteport_to_rport(portptr); struct nvme_fc_lport *lport = rport->lport; struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; struct nvmefc_ls_rcv_op *lsop; unsigned long flags; int ret; nvme_fc_rport_get(rport); /* validate there's a routine to transmit a response */ if (!lport->ops->xmt_ls_rsp) { dev_info(lport->dev, "RCV %s LS failed: no LLDD xmt_ls_rsp\n", (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); ret = -EINVAL; goto out_put; } if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { dev_info(lport->dev, "RCV %s LS failed: payload too large\n", (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); ret = -E2BIG; goto out_put; } lsop = kzalloc(sizeof(*lsop), GFP_KERNEL); if (!lsop) { nvme_fc_rcv_ls_req_err_msg(lport, w0); ret = -ENOMEM; goto out_put; } lsop->rqstbuf = kzalloc(sizeof(*lsop->rqstbuf), GFP_KERNEL); lsop->rspbuf = kzalloc(sizeof(*lsop->rspbuf), GFP_KERNEL); if (!lsop->rqstbuf || !lsop->rspbuf) { nvme_fc_rcv_ls_req_err_msg(lport, w0); ret = -ENOMEM; goto out_free; } lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, sizeof(*lsop->rspbuf), DMA_TO_DEVICE); if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { dev_info(lport->dev, "RCV %s LS failed: DMA mapping failure\n", (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); ret = -EFAULT; goto out_free; } lsop->rport = rport; lsop->lsrsp = lsrsp; memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); lsop->rqstdatalen = lsreqbuf_len; spin_lock_irqsave(&rport->lock, flags); if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { spin_unlock_irqrestore(&rport->lock, flags); ret = -ENOTCONN; goto out_unmap; } list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); spin_unlock_irqrestore(&rport->lock, flags); schedule_work(&rport->lsrcv_work); return 0; out_unmap: fc_dma_unmap_single(lport->dev, lsop->rspdma, sizeof(*lsop->rspbuf), DMA_TO_DEVICE); out_free: kfree(lsop->rspbuf); kfree(lsop->rqstbuf); kfree(lsop); out_put: nvme_fc_rport_put(rport); return ret; } EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req); /* *********************** NVME Ctrl Routines **************************** */ static void __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) { fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, sizeof(op->rsp_iu), DMA_FROM_DEVICE); fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, sizeof(op->cmd_iu), DMA_TO_DEVICE); atomic_set(&op->state, FCPOP_STATE_UNINIT); } static void nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op); } static int __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) { unsigned long flags; int opstate; spin_lock_irqsave(&ctrl->lock, flags); opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); if (opstate != FCPOP_STATE_ACTIVE) atomic_set(&op->state, opstate); else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { op->flags |= FCOP_FLAGS_TERMIO; ctrl->iocnt++; } spin_unlock_irqrestore(&ctrl->lock, flags); if (opstate != FCPOP_STATE_ACTIVE) return -ECANCELED; ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, &ctrl->rport->remoteport, op->queue->lldd_handle, &op->fcp_req); return 0; } static void nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) { struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; int i; /* ensure we've initialized the ops once */ if (!(aen_op->flags & FCOP_FLAGS_AEN)) return; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) __nvme_fc_abort_op(ctrl, aen_op); } static inline void __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op, int opstate) { unsigned long flags; if (opstate == FCPOP_STATE_ABORTED) { spin_lock_irqsave(&ctrl->lock, flags); if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && op->flags & FCOP_FLAGS_TERMIO) { if (!--ctrl->iocnt) wake_up(&ctrl->ioabort_wait); } spin_unlock_irqrestore(&ctrl->lock, flags); } } static void nvme_fc_ctrl_ioerr_work(struct work_struct *work) { struct nvme_fc_ctrl *ctrl = container_of(work, struct nvme_fc_ctrl, ioerr_work); nvme_fc_error_recovery(ctrl, "transport detected io error"); } /* * nvme_fc_io_getuuid - Routine called to get the appid field * associated with request by the lldd * @req:IO request from nvme fc to driver * Returns: UUID if there is an appid associated with VM or * NULL if the user/libvirt has not set the appid to VM */ char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req) { struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); struct request *rq = op->rq; if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio) return NULL; return blkcg_get_fc_appid(rq->bio); } EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid); static void nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) { struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); struct request *rq = op->rq; struct nvmefc_fcp_req *freq = &op->fcp_req; struct nvme_fc_ctrl *ctrl = op->ctrl; struct nvme_fc_queue *queue = op->queue; struct nvme_completion *cqe = &op->rsp_iu.cqe; struct nvme_command *sqe = &op->cmd_iu.sqe; __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); union nvme_result result; bool terminate_assoc = true; int opstate; /* * WARNING: * The current linux implementation of a nvme controller * allocates a single tag set for all io queues and sizes * the io queues to fully hold all possible tags. Thus, the * implementation does not reference or care about the sqhd * value as it never needs to use the sqhd/sqtail pointers * for submission pacing. * * This affects the FC-NVME implementation in two ways: * 1) As the value doesn't matter, we don't need to waste * cycles extracting it from ERSPs and stamping it in the * cases where the transport fabricates CQEs on successful * completions. * 2) The FC-NVME implementation requires that delivery of * ERSP completions are to go back to the nvme layer in order * relative to the rsn, such that the sqhd value will always * be "in order" for the nvme layer. As the nvme layer in * linux doesn't care about sqhd, there's no need to return * them in order. * * Additionally: * As the core nvme layer in linux currently does not look at * every field in the cqe - in cases where the FC transport must * fabricate a CQE, the following fields will not be set as they * are not referenced: * cqe.sqid, cqe.sqhd, cqe.command_id * * Failure or error of an individual i/o, in a transport * detected fashion unrelated to the nvme completion status, * potentially cause the initiator and target sides to get out * of sync on SQ head/tail (aka outstanding io count allowed). * Per FC-NVME spec, failure of an individual command requires * the connection to be terminated, which in turn requires the * association to be terminated. */ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, sizeof(op->rsp_iu), DMA_FROM_DEVICE); if (opstate == FCPOP_STATE_ABORTED) status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); else if (freq->status) { status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); dev_info(ctrl->ctrl.device, "NVME-FC{%d}: io failed due to lldd error %d\n", ctrl->cnum, freq->status); } /* * For the linux implementation, if we have an unsuccesful * status, they blk-mq layer can typically be called with the * non-zero status and the content of the cqe isn't important. */ if (status) goto done; /* * command completed successfully relative to the wire * protocol. However, validate anything received and * extract the status and result from the cqe (create it * where necessary). */ switch (freq->rcv_rsplen) { case 0: case NVME_FC_SIZEOF_ZEROS_RSP: /* * No response payload or 12 bytes of payload (which * should all be zeros) are considered successful and * no payload in the CQE by the transport. */ if (freq->transferred_length != be32_to_cpu(op->cmd_iu.data_len)) { status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); dev_info(ctrl->ctrl.device, "NVME-FC{%d}: io failed due to bad transfer " "length: %d vs expected %d\n", ctrl->cnum, freq->transferred_length, be32_to_cpu(op->cmd_iu.data_len)); goto done; } result.u64 = 0; break; case sizeof(struct nvme_fc_ersp_iu): /* * The ERSP IU contains a full completion with CQE. * Validate ERSP IU and look at cqe. */ if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != (freq->rcv_rsplen / 4) || be32_to_cpu(op->rsp_iu.xfrd_len) != freq->transferred_length || op->rsp_iu.ersp_result || sqe->common.command_id != cqe->command_id)) { status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); dev_info(ctrl->ctrl.device, "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " "iu len %d, xfr len %d vs %d, status code " "%d, cmdid %d vs %d\n", ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), be32_to_cpu(op->rsp_iu.xfrd_len), freq->transferred_length, op->rsp_iu.ersp_result, sqe->common.command_id, cqe->command_id); goto done; } result = cqe->result; status = cqe->status; break; default: status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); dev_info(ctrl->ctrl.device, "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " "len %d\n", ctrl->cnum, freq->rcv_rsplen); goto done; } terminate_assoc = false; done: if (op->flags & FCOP_FLAGS_AEN) { nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); atomic_set(&op->state, FCPOP_STATE_IDLE); op->flags = FCOP_FLAGS_AEN; /* clear other flags */ nvme_fc_ctrl_put(ctrl); goto check_error; } __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); if (!nvme_try_complete_req(rq, status, result)) nvme_fc_complete_rq(rq); check_error: if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) queue_work(nvme_reset_wq, &ctrl->ioerr_work); } static int __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, struct request *rq, u32 rqno) { struct nvme_fcp_op_w_sgl *op_w_sgl = container_of(op, typeof(*op_w_sgl), op); struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; int ret = 0; memset(op, 0, sizeof(*op)); op->fcp_req.cmdaddr = &op->cmd_iu; op->fcp_req.cmdlen = sizeof(op->cmd_iu); op->fcp_req.rspaddr = &op->rsp_iu; op->fcp_req.rsplen = sizeof(op->rsp_iu); op->fcp_req.done = nvme_fc_fcpio_done; op->ctrl = ctrl; op->queue = queue; op->rq = rq; op->rqno = rqno; cmdiu->format_id = NVME_CMD_FORMAT_ID; cmdiu->fc_id = NVME_CMD_FC_ID; cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); if (queue->qnum) cmdiu->rsv_cat = fccmnd_set_cat_css(0, (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); else cmdiu->rsv_cat = fccmnd_set_cat_admin(0); op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { dev_err(ctrl->dev, "FCP Op failed - cmdiu dma mapping failed.\n"); ret = -EFAULT; goto out_on_error; } op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, &op->rsp_iu, sizeof(op->rsp_iu), DMA_FROM_DEVICE); if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { dev_err(ctrl->dev, "FCP Op failed - rspiu dma mapping failed.\n"); ret = -EFAULT; } atomic_set(&op->state, FCPOP_STATE_IDLE); out_on_error: return ret; } static int nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; int res; res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); if (res) return res; op->op.fcp_req.first_sgl = op->sgl; op->op.fcp_req.private = &op->priv[0]; nvme_req(rq)->ctrl = &ctrl->ctrl; nvme_req(rq)->cmd = &op->op.cmd_iu.sqe; return res; } static int nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) { struct nvme_fc_fcp_op *aen_op; struct nvme_fc_cmd_iu *cmdiu; struct nvme_command *sqe; void *private = NULL; int i, ret; aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { if (ctrl->lport->ops->fcprqst_priv_sz) { private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, GFP_KERNEL); if (!private) return -ENOMEM; } cmdiu = &aen_op->cmd_iu; sqe = &cmdiu->sqe; ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], aen_op, (struct request *)NULL, (NVME_AQ_BLK_MQ_DEPTH + i)); if (ret) { kfree(private); return ret; } aen_op->flags = FCOP_FLAGS_AEN; aen_op->fcp_req.private = private; memset(sqe, 0, sizeof(*sqe)); sqe->common.opcode = nvme_admin_async_event; /* Note: core layer may overwrite the sqe.command_id value */ sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; } return 0; } static void nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) { struct nvme_fc_fcp_op *aen_op; int i; cancel_work_sync(&ctrl->ctrl.async_event_work); aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { __nvme_fc_exit_request(ctrl, aen_op); kfree(aen_op->fcp_req.private); aen_op->fcp_req.private = NULL; } } static inline int __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data); struct nvme_fc_queue *queue = &ctrl->queues[qidx]; hctx->driver_data = queue; queue->hctx = hctx; return 0; } static int nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1); } static int nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { return __nvme_fc_init_hctx(hctx, data, hctx_idx); } static void nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) { struct nvme_fc_queue *queue; queue = &ctrl->queues[idx]; memset(queue, 0, sizeof(*queue)); queue->ctrl = ctrl; queue->qnum = idx; atomic_set(&queue->csn, 0); queue->dev = ctrl->dev; if (idx > 0) queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; else queue->cmnd_capsule_len = sizeof(struct nvme_command); /* * Considered whether we should allocate buffers for all SQEs * and CQEs and dma map them - mapping their respective entries * into the request structures (kernel vm addr and dma address) * thus the driver could use the buffers/mappings directly. * It only makes sense if the LLDD would use them for its * messaging api. It's very unlikely most adapter api's would use * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload * structures were used instead. */ } /* * This routine terminates a queue at the transport level. * The transport has already ensured that all outstanding ios on * the queue have been terminated. * The transport will send a Disconnect LS request to terminate * the queue's connection. Termination of the admin queue will also * terminate the association at the target. */ static void nvme_fc_free_queue(struct nvme_fc_queue *queue) { if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) return; clear_bit(NVME_FC_Q_LIVE, &queue->flags); /* * Current implementation never disconnects a single queue. * It always terminates a whole association. So there is never * a disconnect(queue) LS sent to the target. */ queue->connection_id = 0; atomic_set(&queue->csn, 0); } static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, unsigned int qidx) { if (ctrl->lport->ops->delete_queue) ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, queue->lldd_handle); queue->lldd_handle = NULL; } static void nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) { int i; for (i = 1; i < ctrl->ctrl.queue_count; i++) nvme_fc_free_queue(&ctrl->queues[i]); } static int __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) { int ret = 0; queue->lldd_handle = NULL; if (ctrl->lport->ops->create_queue) ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, qidx, qsize, &queue->lldd_handle); return ret; } static void nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) { struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; int i; for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) __nvme_fc_delete_hw_queue(ctrl, queue, i); } static int nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) { struct nvme_fc_queue *queue = &ctrl->queues[1]; int i, ret; for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); if (ret) goto delete_queues; } return 0; delete_queues: for (; i > 0; i--) __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); return ret; } static int nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) { int i, ret = 0; for (i = 1; i < ctrl->ctrl.queue_count; i++) { ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, (qsize / 5)); if (ret) break; ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) break; set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); } return ret; } static void nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) { int i; for (i = 1; i < ctrl->ctrl.queue_count; i++) nvme_fc_init_queue(ctrl, i); } static void nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); unsigned long flags; if (ctrl->ctrl.tagset) nvme_remove_io_tag_set(&ctrl->ctrl); /* remove from rport list */ spin_lock_irqsave(&ctrl->rport->lock, flags); list_del(&ctrl->ctrl_list); spin_unlock_irqrestore(&ctrl->rport->lock, flags); nvme_unquiesce_admin_queue(&ctrl->ctrl); nvme_remove_admin_tag_set(&ctrl->ctrl); kfree(ctrl->queues); put_device(ctrl->dev); nvme_fc_rport_put(ctrl->rport); ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); } static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) { kref_put(&ctrl->ref, nvme_fc_ctrl_free); } static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) { return kref_get_unless_zero(&ctrl->ref); } /* * All accesses from nvme core layer done - can now free the * controller. Called after last nvme_put_ctrl() call */ static void nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); WARN_ON(nctrl != &ctrl->ctrl); nvme_fc_ctrl_put(ctrl); } /* * This routine is used by the transport when it needs to find active * io on a queue that is to be terminated. The transport uses * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke * this routine to kill them on a 1 by 1 basis. * * As FC allocates FC exchange for each io, the transport must contact * the LLDD to terminate the exchange, thus releasing the FC exchange. * After terminating the exchange the LLDD will call the transport's * normal io done path for the request, but it will have an aborted * status. The done path will return the io request back to the block * layer with an error status. */ static bool nvme_fc_terminate_exchange(struct request *req, void *data) { struct nvme_ctrl *nctrl = data; struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); op->nreq.flags |= NVME_REQ_CANCELLED; __nvme_fc_abort_op(ctrl, op); return true; } /* * This routine runs through all outstanding commands on the association * and aborts them. This routine is typically be called by the * delete_association routine. It is also called due to an error during * reconnect. In that scenario, it is most likely a command that initializes * the controller, including fabric Connect commands on io queues, that * may have timed out or failed thus the io must be killed for the connect * thread to see the error. */ static void __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) { int q; /* * if aborting io, the queues are no longer good, mark them * all as not live. */ if (ctrl->ctrl.queue_count > 1) { for (q = 1; q < ctrl->ctrl.queue_count; q++) clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); } clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); /* * If io queues are present, stop them and terminate all outstanding * ios on them. As FC allocates FC exchange for each io, the * transport must contact the LLDD to terminate the exchange, * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() * to tell us what io's are busy and invoke a transport routine * to kill them with the LLDD. After terminating the exchange * the LLDD will call the transport's normal io done path, but it * will have an aborted status. The done path will return the * io requests back to the block layer as part of normal completions * (but with error status). */ if (ctrl->ctrl.queue_count > 1) { nvme_quiesce_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_fc_terminate_exchange, &ctrl->ctrl); blk_mq_tagset_wait_completed_request(&ctrl->tag_set); if (start_queues) nvme_unquiesce_io_queues(&ctrl->ctrl); } /* * Other transports, which don't have link-level contexts bound * to sqe's, would try to gracefully shutdown the controller by * writing the registers for shutdown and polling (call * nvme_disable_ctrl()). Given a bunch of i/o was potentially * just aborted and we will wait on those contexts, and given * there was no indication of how live the controlelr is on the * link, don't send more io to create more contexts for the * shutdown. Let the controller fail via keepalive failure if * its still present. */ /* * clean up the admin queue. Same thing as above. */ nvme_quiesce_admin_queue(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_fc_terminate_exchange, &ctrl->ctrl); blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); if (start_queues) nvme_unquiesce_admin_queue(&ctrl->ctrl); } static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) { /* * if an error (io timeout, etc) while (re)connecting, the remote * port requested terminating of the association (disconnect_ls) * or an error (timeout or abort) occurred on an io while creating * the controller. Abort any ios on the association and let the * create_association error path resolve things. */ enum nvme_ctrl_state state; unsigned long flags; spin_lock_irqsave(&ctrl->lock, flags); state = ctrl->ctrl.state; if (state == NVME_CTRL_CONNECTING) { set_bit(ASSOC_FAILED, &ctrl->flags); spin_unlock_irqrestore(&ctrl->lock, flags); __nvme_fc_abort_outstanding_ios(ctrl, true); dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: transport error during (re)connect\n", ctrl->cnum); return; } spin_unlock_irqrestore(&ctrl->lock, flags); /* Otherwise, only proceed if in LIVE state - e.g. on first error */ if (state != NVME_CTRL_LIVE) return; dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: transport association event: %s\n", ctrl->cnum, errmsg); dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: resetting controller\n", ctrl->cnum); nvme_reset_ctrl(&ctrl->ctrl); } static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_ctrl *ctrl = op->ctrl; struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; /* * Attempt to abort the offending command. Command completion * will detect the aborted io and will fail the connection. */ dev_info(ctrl->ctrl.device, "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " "x%08x/x%08x\n", ctrl->cnum, op->queue->qnum, sqe->common.opcode, sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); if (__nvme_fc_abort_op(ctrl, op)) nvme_fc_error_recovery(ctrl, "io timeout abort failed"); /* * the io abort has been initiated. Have the reset timer * restarted and the abort completion will complete the io * shortly. Avoids a synchronous wait while the abort finishes. */ return BLK_EH_RESET_TIMER; } static int nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, struct nvme_fc_fcp_op *op) { struct nvmefc_fcp_req *freq = &op->fcp_req; int ret; freq->sg_cnt = 0; if (!blk_rq_nr_phys_segments(rq)) return 0; freq->sg_table.sgl = freq->first_sgl; ret = sg_alloc_table_chained(&freq->sg_table, blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, NVME_INLINE_SG_CNT); if (ret) return -ENOMEM; op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); if (unlikely(freq->sg_cnt <= 0)) { sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); freq->sg_cnt = 0; return -EFAULT; } /* * TODO: blk_integrity_rq(rq) for DIF */ return 0; } static void nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, struct nvme_fc_fcp_op *op) { struct nvmefc_fcp_req *freq = &op->fcp_req; if (!freq->sg_cnt) return; fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); freq->sg_cnt = 0; } /* * In FC, the queue is a logical thing. At transport connect, the target * creates its "queue" and returns a handle that is to be given to the * target whenever it posts something to the corresponding SQ. When an * SQE is sent on a SQ, FC effectively considers the SQE, or rather the * command contained within the SQE, an io, and assigns a FC exchange * to it. The SQE and the associated SQ handle are sent in the initial * CMD IU sents on the exchange. All transfers relative to the io occur * as part of the exchange. The CQE is the last thing for the io, * which is transferred (explicitly or implicitly) with the RSP IU * sent on the exchange. After the CQE is received, the FC exchange is * terminaed and the Exchange may be used on a different io. * * The transport to LLDD api has the transport making a request for a * new fcp io request to the LLDD. The LLDD then allocates a FC exchange * resource and transfers the command. The LLDD will then process all * steps to complete the io. Upon completion, the transport done routine * is called. * * So - while the operation is outstanding to the LLDD, there is a link * level FC exchange resource that is also outstanding. This must be * considered in all cleanup operations. */ static blk_status_t nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, u32 data_len, enum nvmefc_fcp_datadir io_dir) { struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; int ret, opstate; /* * before attempting to send the io, check to see if we believe * the target device is present */ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) return BLK_STS_RESOURCE; if (!nvme_fc_ctrl_get(ctrl)) return BLK_STS_IOERR; /* format the FC-NVME CMD IU and fcp_req */ cmdiu->connection_id = cpu_to_be64(queue->connection_id); cmdiu->data_len = cpu_to_be32(data_len); switch (io_dir) { case NVMEFC_FCP_WRITE: cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; break; case NVMEFC_FCP_READ: cmdiu->flags = FCNVME_CMD_FLAGS_READ; break; case NVMEFC_FCP_NODATA: cmdiu->flags = 0; break; } op->fcp_req.payload_length = data_len; op->fcp_req.io_dir = io_dir; op->fcp_req.transferred_length = 0; op->fcp_req.rcv_rsplen = 0; op->fcp_req.status = NVME_SC_SUCCESS; op->fcp_req.sqid = cpu_to_le16(queue->qnum); /* * validate per fabric rules, set fields mandated by fabric spec * as well as those by FC-NVME spec. */ WARN_ON_ONCE(sqe->common.metadata); sqe->common.flags |= NVME_CMD_SGL_METABUF; /* * format SQE DPTR field per FC-NVME rules: * type=0x5 Transport SGL Data Block Descriptor * subtype=0xA Transport-specific value * address=0 * length=length of the data series */ sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | NVME_SGL_FMT_TRANSPORT_A; sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); sqe->rw.dptr.sgl.addr = 0; if (!(op->flags & FCOP_FLAGS_AEN)) { ret = nvme_fc_map_data(ctrl, op->rq, op); if (ret < 0) { nvme_cleanup_cmd(op->rq); nvme_fc_ctrl_put(ctrl); if (ret == -ENOMEM || ret == -EAGAIN) return BLK_STS_RESOURCE; return BLK_STS_IOERR; } } fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, sizeof(op->cmd_iu), DMA_TO_DEVICE); atomic_set(&op->state, FCPOP_STATE_ACTIVE); if (!(op->flags & FCOP_FLAGS_AEN)) nvme_start_request(op->rq); cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, &ctrl->rport->remoteport, queue->lldd_handle, &op->fcp_req); if (ret) { /* * If the lld fails to send the command is there an issue with * the csn value? If the command that fails is the Connect, * no - as the connection won't be live. If it is a command * post-connect, it's possible a gap in csn may be created. * Does this matter? As Linux initiators don't send fused * commands, no. The gap would exist, but as there's nothing * that depends on csn order to be delivered on the target * side, it shouldn't hurt. It would be difficult for a * target to even detect the csn gap as it has no idea when the * cmd with the csn was supposed to arrive. */ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); if (!(op->flags & FCOP_FLAGS_AEN)) { nvme_fc_unmap_data(ctrl, op->rq, op); nvme_cleanup_cmd(op->rq); } nvme_fc_ctrl_put(ctrl); if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && ret != -EBUSY) return BLK_STS_IOERR; return BLK_STS_RESOURCE; } return BLK_STS_OK; } static blk_status_t nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nvme_ns *ns = hctx->queue->queuedata; struct nvme_fc_queue *queue = hctx->driver_data; struct nvme_fc_ctrl *ctrl = queue->ctrl; struct request *rq = bd->rq; struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); enum nvmefc_fcp_datadir io_dir; bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); u32 data_len; blk_status_t ret; if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); ret = nvme_setup_cmd(ns, rq); if (ret) return ret; /* * nvme core doesn't quite treat the rq opaquely. Commands such * as WRITE ZEROES will return a non-zero rq payload_bytes yet * there is no actual payload to be transferred. * To get it right, key data transmission on there being 1 or * more physical segments in the sg list. If there is no * physical segments, there is no payload. */ if (blk_rq_nr_phys_segments(rq)) { data_len = blk_rq_payload_bytes(rq); io_dir = ((rq_data_dir(rq) == WRITE) ? NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); } else { data_len = 0; io_dir = NVMEFC_FCP_NODATA; } return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); } static void nvme_fc_submit_async_event(struct nvme_ctrl *arg) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); struct nvme_fc_fcp_op *aen_op; blk_status_t ret; if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) return; aen_op = &ctrl->aen_ops[0]; ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, NVMEFC_FCP_NODATA); if (ret) dev_err(ctrl->ctrl.device, "failed async event work\n"); } static void nvme_fc_complete_rq(struct request *rq) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_ctrl *ctrl = op->ctrl; atomic_set(&op->state, FCPOP_STATE_IDLE); op->flags &= ~FCOP_FLAGS_TERMIO; nvme_fc_unmap_data(ctrl, rq, op); nvme_complete_rq(rq); nvme_fc_ctrl_put(ctrl); } static void nvme_fc_map_queues(struct blk_mq_tag_set *set) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); int i; for (i = 0; i < set->nr_maps; i++) { struct blk_mq_queue_map *map = &set->map[i]; if (!map->nr_queues) { WARN_ON(i == HCTX_TYPE_DEFAULT); continue; } /* Call LLDD map queue functionality if defined */ if (ctrl->lport->ops->map_queues) ctrl->lport->ops->map_queues(&ctrl->lport->localport, map); else blk_mq_map_queues(map); } } static const struct blk_mq_ops nvme_fc_mq_ops = { .queue_rq = nvme_fc_queue_rq, .complete = nvme_fc_complete_rq, .init_request = nvme_fc_init_request, .exit_request = nvme_fc_exit_request, .init_hctx = nvme_fc_init_hctx, .timeout = nvme_fc_timeout, .map_queues = nvme_fc_map_queues, }; static int nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; unsigned int nr_io_queues; int ret; nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), ctrl->lport->ops->max_hw_queues); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) { dev_info(ctrl->ctrl.device, "set_queue_count failed: %d\n", ret); return ret; } ctrl->ctrl.queue_count = nr_io_queues + 1; if (!nr_io_queues) return 0; nvme_fc_init_io_queues(ctrl); ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, &nvme_fc_mq_ops, 1, struct_size_t(struct nvme_fcp_op_w_sgl, priv, ctrl->lport->ops->fcprqst_priv_sz)); if (ret) return ret; ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); if (ret) goto out_cleanup_tagset; ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); if (ret) goto out_delete_hw_queues; ctrl->ioq_live = true; return 0; out_delete_hw_queues: nvme_fc_delete_hw_io_queues(ctrl); out_cleanup_tagset: nvme_remove_io_tag_set(&ctrl->ctrl); nvme_fc_free_io_queues(ctrl); /* force put free routine to ignore io queues */ ctrl->ctrl.tagset = NULL; return ret; } static int nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; unsigned int nr_io_queues; int ret; nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), ctrl->lport->ops->max_hw_queues); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) { dev_info(ctrl->ctrl.device, "set_queue_count failed: %d\n", ret); return ret; } if (!nr_io_queues && prior_ioq_cnt) { dev_info(ctrl->ctrl.device, "Fail Reconnect: At least 1 io queue " "required (was %d)\n", prior_ioq_cnt); return -ENOSPC; } ctrl->ctrl.queue_count = nr_io_queues + 1; /* check for io queues existing */ if (ctrl->ctrl.queue_count == 1) return 0; if (prior_ioq_cnt != nr_io_queues) { dev_info(ctrl->ctrl.device, "reconnect: revising io queue count from %d to %d\n", prior_ioq_cnt, nr_io_queues); blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); } ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); if (ret) goto out_free_io_queues; ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); if (ret) goto out_delete_hw_queues; return 0; out_delete_hw_queues: nvme_fc_delete_hw_io_queues(ctrl); out_free_io_queues: nvme_fc_free_io_queues(ctrl); return ret; } static void nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) { struct nvme_fc_lport *lport = rport->lport; atomic_inc(&lport->act_rport_cnt); } static void nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) { struct nvme_fc_lport *lport = rport->lport; u32 cnt; cnt = atomic_dec_return(&lport->act_rport_cnt); if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) lport->ops->localport_delete(&lport->localport); } static int nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) { struct nvme_fc_rport *rport = ctrl->rport; u32 cnt; if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) return 1; cnt = atomic_inc_return(&rport->act_ctrl_cnt); if (cnt == 1) nvme_fc_rport_active_on_lport(rport); return 0; } static int nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) { struct nvme_fc_rport *rport = ctrl->rport; struct nvme_fc_lport *lport = rport->lport; u32 cnt; /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ cnt = atomic_dec_return(&rport->act_ctrl_cnt); if (cnt == 0) { if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) lport->ops->remoteport_delete(&rport->remoteport); nvme_fc_rport_inactive_on_lport(rport); } return 0; } /* * This routine restarts the controller on the host side, and * on the link side, recreates the controller association. */ static int nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; struct nvmefc_ls_rcv_op *disls = NULL; unsigned long flags; int ret; bool changed; ++ctrl->ctrl.nr_reconnects; if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) return -ENODEV; if (nvme_fc_ctlr_active_on_rport(ctrl)) return -ENOTUNIQ; dev_info(ctrl->ctrl.device, "NVME-FC{%d}: create association : host wwpn 0x%016llx " " rport wwpn 0x%016llx: NQN \"%s\"\n", ctrl->cnum, ctrl->lport->localport.port_name, ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); clear_bit(ASSOC_FAILED, &ctrl->flags); /* * Create the admin queue */ ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, NVME_AQ_DEPTH); if (ret) goto out_free_queue; ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); if (ret) goto out_delete_hw_queue; ret = nvmf_connect_admin_queue(&ctrl->ctrl); if (ret) goto out_disconnect_admin_queue; set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); /* * Check controller capabilities * * todo:- add code to check if ctrl attributes changed from * prior connection values */ ret = nvme_enable_ctrl(&ctrl->ctrl); if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) ret = -EIO; if (ret) goto out_disconnect_admin_queue; ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << (ilog2(SZ_4K) - 9); nvme_unquiesce_admin_queue(&ctrl->ctrl); ret = nvme_init_ctrl_finish(&ctrl->ctrl, false); if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) ret = -EIO; if (ret) goto out_disconnect_admin_queue; /* sanity checks */ /* FC-NVME does not have other data in the capsule */ if (ctrl->ctrl.icdoff) { dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", ctrl->ctrl.icdoff); ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out_disconnect_admin_queue; } /* FC-NVME supports normal SGL Data Block Descriptors */ if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { dev_err(ctrl->ctrl.device, "Mandatory sgls are not supported!\n"); ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out_disconnect_admin_queue; } if (opts->queue_size > ctrl->ctrl.maxcmd) { /* warn if maxcmd is lower than queue_size */ dev_warn(ctrl->ctrl.device, "queue_size %zu > ctrl maxcmd %u, reducing " "to maxcmd\n", opts->queue_size, ctrl->ctrl.maxcmd); opts->queue_size = ctrl->ctrl.maxcmd; ctrl->ctrl.sqsize = opts->queue_size - 1; } ret = nvme_fc_init_aen_ops(ctrl); if (ret) goto out_term_aen_ops; /* * Create the io queues */ if (ctrl->ctrl.queue_count > 1) { if (!ctrl->ioq_live) ret = nvme_fc_create_io_queues(ctrl); else ret = nvme_fc_recreate_io_queues(ctrl); } spin_lock_irqsave(&ctrl->lock, flags); if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) ret = -EIO; if (ret) { spin_unlock_irqrestore(&ctrl->lock, flags); goto out_term_aen_ops; } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); spin_unlock_irqrestore(&ctrl->lock, flags); ctrl->ctrl.nr_reconnects = 0; if (changed) nvme_start_ctrl(&ctrl->ctrl); return 0; /* Success */ out_term_aen_ops: nvme_fc_term_aen_ops(ctrl); out_disconnect_admin_queue: dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n", ctrl->cnum, ctrl->association_id, ret); /* send a Disconnect(association) LS to fc-nvme target */ nvme_fc_xmt_disconnect_assoc(ctrl); spin_lock_irqsave(&ctrl->lock, flags); ctrl->association_id = 0; disls = ctrl->rcv_disconn; ctrl->rcv_disconn = NULL; spin_unlock_irqrestore(&ctrl->lock, flags); if (disls) nvme_fc_xmt_ls_rsp(disls); out_delete_hw_queue: __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); out_free_queue: nvme_fc_free_queue(&ctrl->queues[0]); clear_bit(ASSOC_ACTIVE, &ctrl->flags); nvme_fc_ctlr_inactive_on_rport(ctrl); return ret; } /* * This routine stops operation of the controller on the host side. * On the host os stack side: Admin and IO queues are stopped, * outstanding ios on them terminated via FC ABTS. * On the link side: the association is terminated. */ static void nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) { struct nvmefc_ls_rcv_op *disls = NULL; unsigned long flags; if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) return; spin_lock_irqsave(&ctrl->lock, flags); set_bit(FCCTRL_TERMIO, &ctrl->flags); ctrl->iocnt = 0; spin_unlock_irqrestore(&ctrl->lock, flags); __nvme_fc_abort_outstanding_ios(ctrl, false); /* kill the aens as they are a separate path */ nvme_fc_abort_aen_ops(ctrl); /* wait for all io that had to be aborted */ spin_lock_irq(&ctrl->lock); wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); clear_bit(FCCTRL_TERMIO, &ctrl->flags); spin_unlock_irq(&ctrl->lock); nvme_fc_term_aen_ops(ctrl); /* * send a Disconnect(association) LS to fc-nvme target * Note: could have been sent at top of process, but * cleaner on link traffic if after the aborts complete. * Note: if association doesn't exist, association_id will be 0 */ if (ctrl->association_id) nvme_fc_xmt_disconnect_assoc(ctrl); spin_lock_irqsave(&ctrl->lock, flags); ctrl->association_id = 0; disls = ctrl->rcv_disconn; ctrl->rcv_disconn = NULL; spin_unlock_irqrestore(&ctrl->lock, flags); if (disls) /* * if a Disconnect Request was waiting for a response, send * now that all ABTS's have been issued (and are complete). */ nvme_fc_xmt_ls_rsp(disls); if (ctrl->ctrl.tagset) { nvme_fc_delete_hw_io_queues(ctrl); nvme_fc_free_io_queues(ctrl); } __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); nvme_fc_free_queue(&ctrl->queues[0]); /* re-enable the admin_q so anything new can fast fail */ nvme_unquiesce_admin_queue(&ctrl->ctrl); /* resume the io queues so that things will fast fail */ nvme_unquiesce_io_queues(&ctrl->ctrl); nvme_fc_ctlr_inactive_on_rport(ctrl); } static void nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); cancel_work_sync(&ctrl->ioerr_work); cancel_delayed_work_sync(&ctrl->connect_work); /* * kill the association on the link side. this will block * waiting for io to terminate */ nvme_fc_delete_association(ctrl); } static void nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) { struct nvme_fc_rport *rport = ctrl->rport; struct nvme_fc_remote_port *portptr = &rport->remoteport; unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; bool recon = true; if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) return; if (portptr->port_state == FC_OBJSTATE_ONLINE) { dev_info(ctrl->ctrl.device, "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", ctrl->cnum, status); if (status > 0 && (status & NVME_SC_DNR)) recon = false; } else if (time_after_eq(jiffies, rport->dev_loss_end)) recon = false; if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { if (portptr->port_state == FC_OBJSTATE_ONLINE) dev_info(ctrl->ctrl.device, "NVME-FC{%d}: Reconnect attempt in %ld " "seconds\n", ctrl->cnum, recon_delay / HZ); else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) recon_delay = rport->dev_loss_end - jiffies; queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); } else { if (portptr->port_state == FC_OBJSTATE_ONLINE) { if (status > 0 && (status & NVME_SC_DNR)) dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: reconnect failure\n", ctrl->cnum); else dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: Max reconnect attempts " "(%d) reached.\n", ctrl->cnum, ctrl->ctrl.nr_reconnects); } else dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: dev_loss_tmo (%d) expired " "while waiting for remoteport connectivity.\n", ctrl->cnum, min_t(int, portptr->dev_loss_tmo, (ctrl->ctrl.opts->max_reconnects * ctrl->ctrl.opts->reconnect_delay))); WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); } } static void nvme_fc_reset_ctrl_work(struct work_struct *work) { struct nvme_fc_ctrl *ctrl = container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); nvme_stop_ctrl(&ctrl->ctrl); /* will block will waiting for io to terminate */ nvme_fc_delete_association(ctrl); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) dev_err(ctrl->ctrl.device, "NVME-FC{%d}: error_recovery: Couldn't change state " "to CONNECTING\n", ctrl->cnum); if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to schedule connect " "after reset\n", ctrl->cnum); } else { flush_delayed_work(&ctrl->connect_work); } } else { nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); } } static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .name = "fc", .module = THIS_MODULE, .flags = NVME_F_FABRICS, .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, .free_ctrl = nvme_fc_nvme_ctrl_freed, .submit_async_event = nvme_fc_submit_async_event, .delete_ctrl = nvme_fc_delete_ctrl, .get_address = nvmf_get_address, }; static void nvme_fc_connect_ctrl_work(struct work_struct *work) { int ret; struct nvme_fc_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_fc_ctrl, connect_work); ret = nvme_fc_create_association(ctrl); if (ret) nvme_fc_reconnect_or_delete(ctrl, ret); else dev_info(ctrl->ctrl.device, "NVME-FC{%d}: controller connect complete\n", ctrl->cnum); } static const struct blk_mq_ops nvme_fc_admin_mq_ops = { .queue_rq = nvme_fc_queue_rq, .complete = nvme_fc_complete_rq, .init_request = nvme_fc_init_request, .exit_request = nvme_fc_exit_request, .init_hctx = nvme_fc_init_admin_hctx, .timeout = nvme_fc_timeout, }; /* * Fails a controller request if it matches an existing controller * (association) with the same tuple: * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> * * The ports don't need to be compared as they are intrinsically * already matched by the port pointers supplied. */ static bool nvme_fc_existing_controller(struct nvme_fc_rport *rport, struct nvmf_ctrl_options *opts) { struct nvme_fc_ctrl *ctrl; unsigned long flags; bool found = false; spin_lock_irqsave(&rport->lock, flags); list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); if (found) break; } spin_unlock_irqrestore(&rport->lock, flags); return found; } static struct nvme_ctrl * nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) { struct nvme_fc_ctrl *ctrl; unsigned long flags; int ret, idx, ctrl_loss_tmo; if (!(rport->remoteport.port_role & (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { ret = -EBADR; goto out_fail; } if (!opts->duplicate_connect && nvme_fc_existing_controller(rport, opts)) { ret = -EALREADY; goto out_fail; } ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { ret = -ENOMEM; goto out_fail; } idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; goto out_free_ctrl; } /* * if ctrl_loss_tmo is being enforced and the default reconnect delay * is being used, change to a shorter reconnect delay for FC. */ if (opts->max_reconnects != -1 && opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, opts->reconnect_delay); } ctrl->ctrl.opts = opts; ctrl->ctrl.nr_reconnects = 0; if (lport->dev) ctrl->ctrl.numa_node = dev_to_node(lport->dev); else ctrl->ctrl.numa_node = NUMA_NO_NODE; INIT_LIST_HEAD(&ctrl->ctrl_list); ctrl->lport = lport; ctrl->rport = rport; ctrl->dev = lport->dev; ctrl->cnum = idx; ctrl->ioq_live = false; init_waitqueue_head(&ctrl->ioabort_wait); get_device(ctrl->dev); kref_init(&ctrl->ref); INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); spin_lock_init(&ctrl->lock); /* io queue count */ ctrl->ctrl.queue_count = min_t(unsigned int, opts->nr_io_queues, lport->ops->max_hw_queues); ctrl->ctrl.queue_count++; /* +1 for admin queue */ ctrl->ctrl.sqsize = opts->queue_size - 1; ctrl->ctrl.kato = opts->kato; ctrl->ctrl.cntlid = 0xffff; ret = -ENOMEM; ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(struct nvme_fc_queue), GFP_KERNEL); if (!ctrl->queues) goto out_free_ida; nvme_fc_init_queue(ctrl, 0); /* * Would have been nice to init io queues tag set as well. * However, we require interaction from the controller * for max io queue count before we can do so. * Defer this to the connect path. */ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); if (ret) goto out_free_queues; /* at this point, teardown path changes to ref counting on nvme ctrl */ ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, &nvme_fc_admin_mq_ops, struct_size_t(struct nvme_fcp_op_w_sgl, priv, ctrl->lport->ops->fcprqst_priv_sz)); if (ret) goto fail_ctrl; spin_lock_irqsave(&rport->lock, flags); list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); spin_unlock_irqrestore(&rport->lock, flags); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); goto fail_ctrl; } if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to schedule initial connect\n", ctrl->cnum); goto fail_ctrl; } flush_delayed_work(&ctrl->connect_work); dev_info(ctrl->ctrl.device, "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl)); return &ctrl->ctrl; fail_ctrl: nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); cancel_work_sync(&ctrl->ioerr_work); cancel_work_sync(&ctrl->ctrl.reset_work); cancel_delayed_work_sync(&ctrl->connect_work); ctrl->ctrl.opts = NULL; /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); /* Remove core ctrl ref. */ nvme_put_ctrl(&ctrl->ctrl); /* as we're past the point where we transition to the ref * counting teardown path, if we return a bad pointer here, * the calling routine, thinking it's prior to the * transition, will do an rport put. Since the teardown * path also does a rport put, we do an extra get here to * so proper order/teardown happens. */ nvme_fc_rport_get(rport); return ERR_PTR(-EIO); out_free_queues: kfree(ctrl->queues); out_free_ida: put_device(ctrl->dev); ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); out_free_ctrl: kfree(ctrl); out_fail: /* exit via here doesn't follow ctlr ref points */ return ERR_PTR(ret); } struct nvmet_fc_traddr { u64 nn; u64 pn; }; static int __nvme_fc_parse_u64(substring_t *sstr, u64 *val) { u64 token64; if (match_u64(sstr, &token64)) return -EINVAL; *val = token64; return 0; } /* * This routine validates and extracts the WWN's from the TRADDR string. * As kernel parsers need the 0x to determine number base, universally * build string to parse with 0x prefix before parsing name strings. */ static int nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) { char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; substring_t wwn = { name, &name[sizeof(name)-1] }; int nnoffset, pnoffset; /* validate if string is one of the 2 allowed formats */ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { nnoffset = NVME_FC_TRADDR_OXNNLEN; pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + NVME_FC_TRADDR_OXNNLEN; } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], "pn-", NVME_FC_TRADDR_NNLEN))) { nnoffset = NVME_FC_TRADDR_NNLEN; pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; } else goto out_einval; name[0] = '0'; name[1] = 'x'; name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) goto out_einval; memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) goto out_einval; return 0; out_einval: pr_warn("%s: bad traddr string\n", __func__); return -EINVAL; } static struct nvme_ctrl * nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { struct nvme_fc_lport *lport; struct nvme_fc_rport *rport; struct nvme_ctrl *ctrl; struct nvmet_fc_traddr laddr = { 0L, 0L }; struct nvmet_fc_traddr raddr = { 0L, 0L }; unsigned long flags; int ret; ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); if (ret || !raddr.nn || !raddr.pn) return ERR_PTR(-EINVAL); ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); if (ret || !laddr.nn || !laddr.pn) return ERR_PTR(-EINVAL); /* find the host and remote ports to connect together */ spin_lock_irqsave(&nvme_fc_lock, flags); list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { if (lport->localport.node_name != laddr.nn || lport->localport.port_name != laddr.pn || lport->localport.port_state != FC_OBJSTATE_ONLINE) continue; list_for_each_entry(rport, &lport->endp_list, endp_list) { if (rport->remoteport.node_name != raddr.nn || rport->remoteport.port_name != raddr.pn || rport->remoteport.port_state != FC_OBJSTATE_ONLINE) continue; /* if fail to get reference fall through. Will error */ if (!nvme_fc_rport_get(rport)) break; spin_unlock_irqrestore(&nvme_fc_lock, flags); ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); if (IS_ERR(ctrl)) nvme_fc_rport_put(rport); return ctrl; } } spin_unlock_irqrestore(&nvme_fc_lock, flags); pr_warn("%s: %s - %s combination not found\n", __func__, opts->traddr, opts->host_traddr); return ERR_PTR(-ENOENT); } static struct nvmf_transport_ops nvme_fc_transport = { .name = "fc", .module = THIS_MODULE, .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, .create_ctrl = nvme_fc_create_ctrl, }; /* Arbitrary successive failures max. With lots of subsystems could be high */ #define DISCOVERY_MAX_FAIL 20 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long flags; LIST_HEAD(local_disc_list); struct nvme_fc_lport *lport; struct nvme_fc_rport *rport; int failcnt = 0; spin_lock_irqsave(&nvme_fc_lock, flags); restart: list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { list_for_each_entry(rport, &lport->endp_list, endp_list) { if (!nvme_fc_lport_get(lport)) continue; if (!nvme_fc_rport_get(rport)) { /* * This is a temporary condition. Upon restart * this rport will be gone from the list. * * Revert the lport put and retry. Anything * added to the list already will be skipped (as * they are no longer list_empty). Loops should * resume at rports that were not yet seen. */ nvme_fc_lport_put(lport); if (failcnt++ < DISCOVERY_MAX_FAIL) goto restart; pr_err("nvme_discovery: too many reference " "failures\n"); goto process_local_list; } if (list_empty(&rport->disc_list)) list_add_tail(&rport->disc_list, &local_disc_list); } } process_local_list: while (!list_empty(&local_disc_list)) { rport = list_first_entry(&local_disc_list, struct nvme_fc_rport, disc_list); list_del_init(&rport->disc_list); spin_unlock_irqrestore(&nvme_fc_lock, flags); lport = rport->lport; /* signal discovery. Won't hurt if it repeats */ nvme_fc_signal_discovery_scan(lport, rport); nvme_fc_rport_put(rport); nvme_fc_lport_put(lport); spin_lock_irqsave(&nvme_fc_lock, flags); } spin_unlock_irqrestore(&nvme_fc_lock, flags); return count; } static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); #ifdef CONFIG_BLK_CGROUP_FC_APPID /* Parse the cgroup id from a buf and return the length of cgrpid */ static int fc_parse_cgrpid(const char *buf, u64 *id) { char cgrp_id[16+1]; int cgrpid_len, j; memset(cgrp_id, 0x0, sizeof(cgrp_id)); for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) { if (buf[cgrpid_len] != ':') cgrp_id[cgrpid_len] = buf[cgrpid_len]; else { j = 1; break; } } if (!j) return -EINVAL; if (kstrtou64(cgrp_id, 16, id) < 0) return -EINVAL; return cgrpid_len; } /* * Parse and update the appid in the blkcg associated with the cgroupid. */ static ssize_t fc_appid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { size_t orig_count = count; u64 cgrp_id; int appid_len = 0; int cgrpid_len = 0; char app_id[FC_APPID_LEN]; int ret = 0; if (buf[count-1] == '\n') count--; if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':'))) return -EINVAL; cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id); if (cgrpid_len < 0) return -EINVAL; appid_len = count - cgrpid_len - 1; if (appid_len > FC_APPID_LEN) return -EINVAL; memset(app_id, 0x0, sizeof(app_id)); memcpy(app_id, &buf[cgrpid_len+1], appid_len); ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id)); if (ret < 0) return ret; return orig_count; } static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store); #endif /* CONFIG_BLK_CGROUP_FC_APPID */ static struct attribute *nvme_fc_attrs[] = { &dev_attr_nvme_discovery.attr, #ifdef CONFIG_BLK_CGROUP_FC_APPID &dev_attr_appid_store.attr, #endif NULL }; static const struct attribute_group nvme_fc_attr_group = { .attrs = nvme_fc_attrs, }; static const struct attribute_group *nvme_fc_attr_groups[] = { &nvme_fc_attr_group, NULL }; static struct class fc_class = { .name = "fc", .dev_groups = nvme_fc_attr_groups, }; static int __init nvme_fc_init_module(void) { int ret; nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); if (!nvme_fc_wq) return -ENOMEM; /* * NOTE: * It is expected that in the future the kernel will combine * the FC-isms that are currently under scsi and now being * added to by NVME into a new standalone FC class. The SCSI * and NVME protocols and their devices would be under this * new FC class. * * As we need something to post FC-specific udev events to, * specifically for nvme probe events, start by creating the * new device class. When the new standalone FC class is * put in place, this code will move to a more generic * location for the class. */ ret = class_register(&fc_class); if (ret) { pr_err("couldn't register class fc\n"); goto out_destroy_wq; } /* * Create a device for the FC-centric udev events */ fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, "fc_udev_device"); if (IS_ERR(fc_udev_device)) { pr_err("couldn't create fc_udev device!\n"); ret = PTR_ERR(fc_udev_device); goto out_destroy_class; } ret = nvmf_register_transport(&nvme_fc_transport); if (ret) goto out_destroy_device; return 0; out_destroy_device: device_destroy(&fc_class, MKDEV(0, 0)); out_destroy_class: class_unregister(&fc_class); out_destroy_wq: destroy_workqueue(nvme_fc_wq); return ret; } static void nvme_fc_delete_controllers(struct nvme_fc_rport *rport) { struct nvme_fc_ctrl *ctrl; spin_lock(&rport->lock); list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: transport unloading: deleting ctrl\n", ctrl->cnum); nvme_delete_ctrl(&ctrl->ctrl); } spin_unlock(&rport->lock); } static void nvme_fc_cleanup_for_unload(void) { struct nvme_fc_lport *lport; struct nvme_fc_rport *rport; list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { list_for_each_entry(rport, &lport->endp_list, endp_list) { nvme_fc_delete_controllers(rport); } } } static void __exit nvme_fc_exit_module(void) { unsigned long flags; bool need_cleanup = false; spin_lock_irqsave(&nvme_fc_lock, flags); nvme_fc_waiting_to_unload = true; if (!list_empty(&nvme_fc_lport_list)) { need_cleanup = true; nvme_fc_cleanup_for_unload(); } spin_unlock_irqrestore(&nvme_fc_lock, flags); if (need_cleanup) { pr_info("%s: waiting for ctlr deletes\n", __func__); wait_for_completion(&nvme_fc_unload_proceed); pr_info("%s: ctrl deletes complete\n", __func__); } nvmf_unregister_transport(&nvme_fc_transport); ida_destroy(&nvme_fc_local_port_cnt); ida_destroy(&nvme_fc_ctrl_cnt); device_destroy(&fc_class, MKDEV(0, 0)); class_unregister(&fc_class); destroy_workqueue(nvme_fc_wq); } module_init(nvme_fc_init_module); module_exit(nvme_fc_exit_module); MODULE_LICENSE("GPL v2");
linux-master
drivers/nvme/host/fc.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics common host code. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/parser.h> #include <linux/seq_file.h> #include "nvme.h" #include "fabrics.h" static LIST_HEAD(nvmf_transports); static DECLARE_RWSEM(nvmf_transports_rwsem); static LIST_HEAD(nvmf_hosts); static DEFINE_MUTEX(nvmf_hosts_mutex); static struct nvmf_host *nvmf_default_host; static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id) { struct nvmf_host *host; host = kmalloc(sizeof(*host), GFP_KERNEL); if (!host) return NULL; kref_init(&host->ref); uuid_copy(&host->id, id); strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE); return host; } static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id) { struct nvmf_host *host; mutex_lock(&nvmf_hosts_mutex); /* * We have defined a host as how it is perceived by the target. * Therefore, we don't allow different Host NQNs with the same Host ID. * Similarly, we do not allow the usage of the same Host NQN with * different Host IDs. This'll maintain unambiguous host identification. */ list_for_each_entry(host, &nvmf_hosts, list) { bool same_hostnqn = !strcmp(host->nqn, hostnqn); bool same_hostid = uuid_equal(&host->id, id); if (same_hostnqn && same_hostid) { kref_get(&host->ref); goto out_unlock; } if (same_hostnqn) { pr_err("found same hostnqn %s but different hostid %pUb\n", hostnqn, id); host = ERR_PTR(-EINVAL); goto out_unlock; } if (same_hostid) { pr_err("found same hostid %pUb but different hostnqn %s\n", id, hostnqn); host = ERR_PTR(-EINVAL); goto out_unlock; } } host = nvmf_host_alloc(hostnqn, id); if (!host) { host = ERR_PTR(-ENOMEM); goto out_unlock; } list_add_tail(&host->list, &nvmf_hosts); out_unlock: mutex_unlock(&nvmf_hosts_mutex); return host; } static struct nvmf_host *nvmf_host_default(void) { struct nvmf_host *host; char nqn[NVMF_NQN_SIZE]; uuid_t id; uuid_gen(&id); snprintf(nqn, NVMF_NQN_SIZE, "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); host = nvmf_host_alloc(nqn, &id); if (!host) return NULL; mutex_lock(&nvmf_hosts_mutex); list_add_tail(&host->list, &nvmf_hosts); mutex_unlock(&nvmf_hosts_mutex); return host; } static void nvmf_host_destroy(struct kref *ref) { struct nvmf_host *host = container_of(ref, struct nvmf_host, ref); mutex_lock(&nvmf_hosts_mutex); list_del(&host->list); mutex_unlock(&nvmf_hosts_mutex); kfree(host); } static void nvmf_host_put(struct nvmf_host *host) { if (host) kref_put(&host->ref, nvmf_host_destroy); } /** * nvmf_get_address() - Get address/port * @ctrl: Host NVMe controller instance which we got the address * @buf: OUTPUT parameter that will contain the address/port * @size: buffer size */ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) { int len = 0; if (ctrl->opts->mask & NVMF_OPT_TRADDR) len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr); if (ctrl->opts->mask & NVMF_OPT_TRSVCID) len += scnprintf(buf + len, size - len, "%strsvcid=%s", (len) ? "," : "", ctrl->opts->trsvcid); if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) len += scnprintf(buf + len, size - len, "%shost_traddr=%s", (len) ? "," : "", ctrl->opts->host_traddr); if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE) len += scnprintf(buf + len, size - len, "%shost_iface=%s", (len) ? "," : "", ctrl->opts->host_iface); len += scnprintf(buf + len, size - len, "\n"); return len; } EXPORT_SYMBOL_GPL(nvmf_get_address); /** * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function. * @ctrl: Host NVMe controller instance maintaining the admin * queue used to submit the property read command to * the allocated NVMe controller resource on the target system. * @off: Starting offset value of the targeted property * register (see the fabrics section of the NVMe standard). * @val: OUTPUT parameter that will contain the value of * the property after a successful read. * * Used by the host system to retrieve a 32-bit capsule property value * from an NVMe controller on the target system. * * ("Capsule property" is an "PCIe register concept" applied to the * NVMe fabrics space.) * * Return: * 0: successful read * > 0: NVMe error status code * < 0: Linux errno error code */ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) { struct nvme_command cmd = { }; union nvme_result res; int ret; cmd.prop_get.opcode = nvme_fabrics_command; cmd.prop_get.fctype = nvme_fabrics_type_property_get; cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, NVME_QID_ANY, 0, 0); if (ret >= 0) *val = le64_to_cpu(res.u64); if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", ret > 0 ? ret & ~NVME_SC_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_read32); /** * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function. * @ctrl: Host NVMe controller instance maintaining the admin * queue used to submit the property read command to * the allocated controller resource on the target system. * @off: Starting offset value of the targeted property * register (see the fabrics section of the NVMe standard). * @val: OUTPUT parameter that will contain the value of * the property after a successful read. * * Used by the host system to retrieve a 64-bit capsule property value * from an NVMe controller on the target system. * * ("Capsule property" is an "PCIe register concept" applied to the * NVMe fabrics space.) * * Return: * 0: successful read * > 0: NVMe error status code * < 0: Linux errno error code */ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) { struct nvme_command cmd = { }; union nvme_result res; int ret; cmd.prop_get.opcode = nvme_fabrics_command; cmd.prop_get.fctype = nvme_fabrics_type_property_get; cmd.prop_get.attrib = 1; cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, NVME_QID_ANY, 0, 0); if (ret >= 0) *val = le64_to_cpu(res.u64); if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", ret > 0 ? ret & ~NVME_SC_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_read64); /** * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function. * @ctrl: Host NVMe controller instance maintaining the admin * queue used to submit the property read command to * the allocated NVMe controller resource on the target system. * @off: Starting offset value of the targeted property * register (see the fabrics section of the NVMe standard). * @val: Input parameter that contains the value to be * written to the property. * * Used by the NVMe host system to write a 32-bit capsule property value * to an NVMe controller on the target system. * * ("Capsule property" is an "PCIe register concept" applied to the * NVMe fabrics space.) * * Return: * 0: successful write * > 0: NVMe error status code * < 0: Linux errno error code */ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) { struct nvme_command cmd = { }; int ret; cmd.prop_set.opcode = nvme_fabrics_command; cmd.prop_set.fctype = nvme_fabrics_type_property_set; cmd.prop_set.attrib = 0; cmd.prop_set.offset = cpu_to_le32(off); cmd.prop_set.value = cpu_to_le64(val); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, NVME_QID_ANY, 0, 0); if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", ret > 0 ? ret & ~NVME_SC_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_write32); /** * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for * connect() errors. * @ctrl: The specific /dev/nvmeX device that had the error. * @errval: Error code to be decoded in a more human-friendly * printout. * @offset: For use with the NVMe error code * NVME_SC_CONNECT_INVALID_PARAM. * @cmd: This is the SQE portion of a submission capsule. * @data: This is the "Data" portion of a submission capsule. */ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, int errval, int offset, struct nvme_command *cmd, struct nvmf_connect_data *data) { int err_sctype = errval & ~NVME_SC_DNR; if (errval < 0) { dev_err(ctrl->device, "Connect command failed, errno: %d\n", errval); return; } switch (err_sctype) { case NVME_SC_CONNECT_INVALID_PARAM: if (offset >> 16) { char *inv_data = "Connect Invalid Data Parameter"; switch (offset & 0xffff) { case (offsetof(struct nvmf_connect_data, cntlid)): dev_err(ctrl->device, "%s, cntlid: %d\n", inv_data, data->cntlid); break; case (offsetof(struct nvmf_connect_data, hostnqn)): dev_err(ctrl->device, "%s, hostnqn \"%s\"\n", inv_data, data->hostnqn); break; case (offsetof(struct nvmf_connect_data, subsysnqn)): dev_err(ctrl->device, "%s, subsysnqn \"%s\"\n", inv_data, data->subsysnqn); break; default: dev_err(ctrl->device, "%s, starting byte offset: %d\n", inv_data, offset & 0xffff); break; } } else { char *inv_sqe = "Connect Invalid SQE Parameter"; switch (offset) { case (offsetof(struct nvmf_connect_command, qid)): dev_err(ctrl->device, "%s, qid %d\n", inv_sqe, cmd->connect.qid); break; default: dev_err(ctrl->device, "%s, starting byte offset: %d\n", inv_sqe, offset); } } break; case NVME_SC_CONNECT_INVALID_HOST: dev_err(ctrl->device, "Connect for subsystem %s is not allowed, hostnqn: %s\n", data->subsysnqn, data->hostnqn); break; case NVME_SC_CONNECT_CTRL_BUSY: dev_err(ctrl->device, "Connect command failed: controller is busy or not available\n"); break; case NVME_SC_CONNECT_FORMAT: dev_err(ctrl->device, "Connect incompatible format: %d", cmd->connect.recfmt); break; case NVME_SC_HOST_PATH_ERROR: dev_err(ctrl->device, "Connect command failed: host path error\n"); break; case NVME_SC_AUTH_REQUIRED: dev_err(ctrl->device, "Connect command failed: authentication required\n"); break; default: dev_err(ctrl->device, "Connect command failed, error wo/DNR bit: %d\n", err_sctype); break; } } static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl, u16 cntlid) { struct nvmf_connect_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL; uuid_copy(&data->hostid, &ctrl->opts->host->id); data->cntlid = cpu_to_le16(cntlid); strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); return data; } static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid, struct nvme_command *cmd) { cmd->connect.opcode = nvme_fabrics_command; cmd->connect.fctype = nvme_fabrics_type_connect; cmd->connect.qid = cpu_to_le16(qid); if (qid) { cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize); } else { cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); /* * set keep-alive timeout in seconds granularity (ms * 1000) */ cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000); } if (ctrl->opts->disable_sqflow) cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; } /** * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect" * API function. * @ctrl: Host nvme controller instance used to request * a new NVMe controller allocation on the target * system and establish an NVMe Admin connection to * that controller. * * This function enables an NVMe host device to request a new allocation of * an NVMe controller resource on a target system as well establish a * fabrics-protocol connection of the NVMe Admin queue between the * host system device and the allocated NVMe controller on the * target system via a NVMe Fabrics "Connect" command. * * Return: * 0: success * > 0: NVMe error status code * < 0: Linux errno error code * */ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) { struct nvme_command cmd = { }; union nvme_result res; struct nvmf_connect_data *data; int ret; u32 result; nvmf_connect_cmd_prep(ctrl, 0, &cmd); data = nvmf_connect_data_prep(ctrl, 0xffff); if (!data) return -ENOMEM; ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, data, sizeof(*data), NVME_QID_ANY, 1, BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); goto out_free_data; } result = le32_to_cpu(res.u32); ctrl->cntlid = result & 0xFFFF; if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) { /* Secure concatenation is not implemented */ if (result & NVME_CONNECT_AUTHREQ_ASCR) { dev_warn(ctrl->device, "qid 0: secure concatenation is not supported\n"); ret = NVME_SC_AUTH_REQUIRED; goto out_free_data; } /* Authentication required */ ret = nvme_auth_negotiate(ctrl, 0); if (ret) { dev_warn(ctrl->device, "qid 0: authentication setup failed\n"); ret = NVME_SC_AUTH_REQUIRED; goto out_free_data; } ret = nvme_auth_wait(ctrl, 0); if (ret) dev_warn(ctrl->device, "qid 0: authentication failed\n"); else dev_info(ctrl->device, "qid 0: authenticated\n"); } out_free_data: kfree(data); return ret; } EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); /** * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect" * API function. * @ctrl: Host nvme controller instance used to establish an * NVMe I/O queue connection to the already allocated NVMe * controller on the target system. * @qid: NVMe I/O queue number for the new I/O connection between * host and target (note qid == 0 is illegal as this is * the Admin queue, per NVMe standard). * * This function issues a fabrics-protocol connection * of a NVMe I/O queue (via NVMe Fabrics "Connect" command) * between the host system device and the allocated NVMe controller * on the target system. * * Return: * 0: success * > 0: NVMe error status code * < 0: Linux errno error code */ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) { struct nvme_command cmd = { }; struct nvmf_connect_data *data; union nvme_result res; int ret; u32 result; nvmf_connect_cmd_prep(ctrl, qid, &cmd); data = nvmf_connect_data_prep(ctrl, ctrl->cntlid); if (!data) return -ENOMEM; ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, data, sizeof(*data), qid, 1, BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); } result = le32_to_cpu(res.u32); if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) { /* Secure concatenation is not implemented */ if (result & NVME_CONNECT_AUTHREQ_ASCR) { dev_warn(ctrl->device, "qid 0: secure concatenation is not supported\n"); ret = NVME_SC_AUTH_REQUIRED; goto out_free_data; } /* Authentication required */ ret = nvme_auth_negotiate(ctrl, qid); if (ret) { dev_warn(ctrl->device, "qid %d: authentication setup failed\n", qid); ret = NVME_SC_AUTH_REQUIRED; } else { ret = nvme_auth_wait(ctrl, qid); if (ret) dev_warn(ctrl->device, "qid %u: authentication failed\n", qid); } } out_free_data: kfree(data); return ret; } EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl) { if (ctrl->opts->max_reconnects == -1 || ctrl->nr_reconnects < ctrl->opts->max_reconnects) return true; return false; } EXPORT_SYMBOL_GPL(nvmf_should_reconnect); /** * nvmf_register_transport() - NVMe Fabrics Library registration function. * @ops: Transport ops instance to be registered to the * common fabrics library. * * API function that registers the type of specific transport fabric * being implemented to the common NVMe fabrics library. Part of * the overall init sequence of starting up a fabrics driver. */ int nvmf_register_transport(struct nvmf_transport_ops *ops) { if (!ops->create_ctrl) return -EINVAL; down_write(&nvmf_transports_rwsem); list_add_tail(&ops->entry, &nvmf_transports); up_write(&nvmf_transports_rwsem); return 0; } EXPORT_SYMBOL_GPL(nvmf_register_transport); /** * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function. * @ops: Transport ops instance to be unregistered from the * common fabrics library. * * Fabrics API function that unregisters the type of specific transport * fabric being implemented from the common NVMe fabrics library. * Part of the overall exit sequence of unloading the implemented driver. */ void nvmf_unregister_transport(struct nvmf_transport_ops *ops) { down_write(&nvmf_transports_rwsem); list_del(&ops->entry); up_write(&nvmf_transports_rwsem); } EXPORT_SYMBOL_GPL(nvmf_unregister_transport); static struct nvmf_transport_ops *nvmf_lookup_transport( struct nvmf_ctrl_options *opts) { struct nvmf_transport_ops *ops; lockdep_assert_held(&nvmf_transports_rwsem); list_for_each_entry(ops, &nvmf_transports, entry) { if (strcmp(ops->name, opts->transport) == 0) return ops; } return NULL; } static const match_table_t opt_tokens = { { NVMF_OPT_TRANSPORT, "transport=%s" }, { NVMF_OPT_TRADDR, "traddr=%s" }, { NVMF_OPT_TRSVCID, "trsvcid=%s" }, { NVMF_OPT_NQN, "nqn=%s" }, { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" }, { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" }, { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" }, { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" }, { NVMF_OPT_KATO, "keep_alive_tmo=%d" }, { NVMF_OPT_HOSTNQN, "hostnqn=%s" }, { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, { NVMF_OPT_HOST_IFACE, "host_iface=%s" }, { NVMF_OPT_HOST_ID, "hostid=%s" }, { NVMF_OPT_DUP_CONNECT, "duplicate_connect" }, { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" }, { NVMF_OPT_HDR_DIGEST, "hdr_digest" }, { NVMF_OPT_DATA_DIGEST, "data_digest" }, { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" }, { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" }, { NVMF_OPT_TOS, "tos=%d" }, { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, { NVMF_OPT_DISCOVERY, "discovery" }, { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" }, { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" }, { NVMF_OPT_ERR, NULL } }; static int nvmf_parse_options(struct nvmf_ctrl_options *opts, const char *buf) { substring_t args[MAX_OPT_ARGS]; char *options, *o, *p; int token, ret = 0; size_t nqnlen = 0; int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; uuid_t hostid; char hostnqn[NVMF_NQN_SIZE]; /* Set defaults */ opts->queue_size = NVMF_DEF_QUEUE_SIZE; opts->nr_io_queues = num_online_cpus(); opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; opts->kato = 0; opts->duplicate_connect = false; opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO; opts->hdr_digest = false; opts->data_digest = false; opts->tos = -1; /* < 0 == use transport default */ options = o = kstrdup(buf, GFP_KERNEL); if (!options) return -ENOMEM; /* use default host if not given by user space */ uuid_copy(&hostid, &nvmf_default_host->id); strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE); while ((p = strsep(&o, ",\n")) != NULL) { if (!*p) continue; token = match_token(p, opt_tokens, args); opts->mask |= token; switch (token) { case NVMF_OPT_TRANSPORT: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } kfree(opts->transport); opts->transport = p; break; case NVMF_OPT_NQN: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } kfree(opts->subsysnqn); opts->subsysnqn = p; nqnlen = strlen(opts->subsysnqn); if (nqnlen >= NVMF_NQN_SIZE) { pr_err("%s needs to be < %d bytes\n", opts->subsysnqn, NVMF_NQN_SIZE); ret = -EINVAL; goto out; } opts->discovery_nqn = !(strcmp(opts->subsysnqn, NVME_DISC_SUBSYS_NAME)); break; case NVMF_OPT_TRADDR: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } kfree(opts->traddr); opts->traddr = p; break; case NVMF_OPT_TRSVCID: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } kfree(opts->trsvcid); opts->trsvcid = p; break; case NVMF_OPT_QUEUE_SIZE: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token < NVMF_MIN_QUEUE_SIZE || token > NVMF_MAX_QUEUE_SIZE) { pr_err("Invalid queue_size %d\n", token); ret = -EINVAL; goto out; } opts->queue_size = token; break; case NVMF_OPT_NR_IO_QUEUES: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token <= 0) { pr_err("Invalid number of IOQs %d\n", token); ret = -EINVAL; goto out; } if (opts->discovery_nqn) { pr_debug("Ignoring nr_io_queues value for discovery controller\n"); break; } opts->nr_io_queues = min_t(unsigned int, num_online_cpus(), token); break; case NVMF_OPT_KATO: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token < 0) { pr_err("Invalid keep_alive_tmo %d\n", token); ret = -EINVAL; goto out; } else if (token == 0 && !opts->discovery_nqn) { /* Allowed for debug */ pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); } opts->kato = token; break; case NVMF_OPT_CTRL_LOSS_TMO: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token < 0) pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n"); ctrl_loss_tmo = token; break; case NVMF_OPT_FAIL_FAST_TMO: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token >= 0) pr_warn("I/O fail on reconnect controller after %d sec\n", token); else token = -1; opts->fast_io_fail_tmo = token; break; case NVMF_OPT_HOSTNQN: if (opts->host) { pr_err("hostnqn already user-assigned: %s\n", opts->host->nqn); ret = -EADDRINUSE; goto out; } p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } nqnlen = strlen(p); if (nqnlen >= NVMF_NQN_SIZE) { pr_err("%s needs to be < %d bytes\n", p, NVMF_NQN_SIZE); kfree(p); ret = -EINVAL; goto out; } strscpy(hostnqn, p, NVMF_NQN_SIZE); kfree(p); break; case NVMF_OPT_RECONNECT_DELAY: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token <= 0) { pr_err("Invalid reconnect_delay %d\n", token); ret = -EINVAL; goto out; } opts->reconnect_delay = token; break; case NVMF_OPT_HOST_TRADDR: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } kfree(opts->host_traddr); opts->host_traddr = p; break; case NVMF_OPT_HOST_IFACE: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } kfree(opts->host_iface); opts->host_iface = p; break; case NVMF_OPT_HOST_ID: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } ret = uuid_parse(p, &hostid); if (ret) { pr_err("Invalid hostid %s\n", p); ret = -EINVAL; kfree(p); goto out; } kfree(p); break; case NVMF_OPT_DUP_CONNECT: opts->duplicate_connect = true; break; case NVMF_OPT_DISABLE_SQFLOW: opts->disable_sqflow = true; break; case NVMF_OPT_HDR_DIGEST: opts->hdr_digest = true; break; case NVMF_OPT_DATA_DIGEST: opts->data_digest = true; break; case NVMF_OPT_NR_WRITE_QUEUES: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token <= 0) { pr_err("Invalid nr_write_queues %d\n", token); ret = -EINVAL; goto out; } opts->nr_write_queues = token; break; case NVMF_OPT_NR_POLL_QUEUES: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token <= 0) { pr_err("Invalid nr_poll_queues %d\n", token); ret = -EINVAL; goto out; } opts->nr_poll_queues = token; break; case NVMF_OPT_TOS: if (match_int(args, &token)) { ret = -EINVAL; goto out; } if (token < 0) { pr_err("Invalid type of service %d\n", token); ret = -EINVAL; goto out; } if (token > 255) { pr_warn("Clamping type of service to 255\n"); token = 255; } opts->tos = token; break; case NVMF_OPT_DISCOVERY: opts->discovery_nqn = true; break; case NVMF_OPT_DHCHAP_SECRET: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) { pr_err("Invalid DH-CHAP secret %s\n", p); ret = -EINVAL; goto out; } kfree(opts->dhchap_secret); opts->dhchap_secret = p; break; case NVMF_OPT_DHCHAP_CTRL_SECRET: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) { pr_err("Invalid DH-CHAP secret %s\n", p); ret = -EINVAL; goto out; } kfree(opts->dhchap_ctrl_secret); opts->dhchap_ctrl_secret = p; break; default: pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", p); ret = -EINVAL; goto out; } } if (opts->discovery_nqn) { opts->nr_io_queues = 0; opts->nr_write_queues = 0; opts->nr_poll_queues = 0; opts->duplicate_connect = true; } else { if (!opts->kato) opts->kato = NVME_DEFAULT_KATO; } if (ctrl_loss_tmo < 0) { opts->max_reconnects = -1; } else { opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, opts->reconnect_delay); if (ctrl_loss_tmo < opts->fast_io_fail_tmo) pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n", opts->fast_io_fail_tmo, ctrl_loss_tmo); } opts->host = nvmf_host_add(hostnqn, &hostid); if (IS_ERR(opts->host)) { ret = PTR_ERR(opts->host); opts->host = NULL; goto out; } out: kfree(options); return ret; } void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues, u32 io_queues[HCTX_MAX_TYPES]) { if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { /* * separate read/write queues * hand out dedicated default queues only after we have * sufficient read queues. */ io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; nr_io_queues -= io_queues[HCTX_TYPE_READ]; io_queues[HCTX_TYPE_DEFAULT] = min(opts->nr_write_queues, nr_io_queues); nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT]; } else { /* * shared read/write queues * either no write queues were requested, or we don't have * sufficient queue count to have dedicated default queues. */ io_queues[HCTX_TYPE_DEFAULT] = min(opts->nr_io_queues, nr_io_queues); nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT]; } if (opts->nr_poll_queues && nr_io_queues) { /* map dedicated poll queues only if we have queues left */ io_queues[HCTX_TYPE_POLL] = min(opts->nr_poll_queues, nr_io_queues); } } EXPORT_SYMBOL_GPL(nvmf_set_io_queues); void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl, u32 io_queues[HCTX_MAX_TYPES]) { struct nvmf_ctrl_options *opts = ctrl->opts; if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) { /* separate read/write queues */ set->map[HCTX_TYPE_DEFAULT].nr_queues = io_queues[HCTX_TYPE_DEFAULT]; set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; set->map[HCTX_TYPE_READ].nr_queues = io_queues[HCTX_TYPE_READ]; set->map[HCTX_TYPE_READ].queue_offset = io_queues[HCTX_TYPE_DEFAULT]; } else { /* shared read/write queues */ set->map[HCTX_TYPE_DEFAULT].nr_queues = io_queues[HCTX_TYPE_DEFAULT]; set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; set->map[HCTX_TYPE_READ].nr_queues = io_queues[HCTX_TYPE_DEFAULT]; set->map[HCTX_TYPE_READ].queue_offset = 0; } blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) { /* map dedicated poll queues only if we have queues left */ set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL]; set->map[HCTX_TYPE_POLL].queue_offset = io_queues[HCTX_TYPE_DEFAULT] + io_queues[HCTX_TYPE_READ]; blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); } dev_info(ctrl->device, "mapped %d/%d/%d default/read/poll queues.\n", io_queues[HCTX_TYPE_DEFAULT], io_queues[HCTX_TYPE_READ], io_queues[HCTX_TYPE_POLL]); } EXPORT_SYMBOL_GPL(nvmf_map_queues); static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, unsigned int required_opts) { if ((opts->mask & required_opts) != required_opts) { unsigned int i; for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { if ((opt_tokens[i].token & required_opts) && !(opt_tokens[i].token & opts->mask)) { pr_warn("missing parameter '%s'\n", opt_tokens[i].pattern); } } return -EINVAL; } return 0; } bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts) { if (!nvmf_ctlr_matches_baseopts(ctrl, opts) || strcmp(opts->traddr, ctrl->opts->traddr) || strcmp(opts->trsvcid, ctrl->opts->trsvcid)) return false; /* * Checking the local address or host interfaces is rough. * * In most cases, none is specified and the host port or * host interface is selected by the stack. * * Assume no match if: * - local address or host interface is specified and address * or host interface is not the same * - local address or host interface is not specified but * remote is, or vice versa (admin using specific * host_traddr/host_iface when it matters). */ if ((opts->mask & NVMF_OPT_HOST_TRADDR) && (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) { if (strcmp(opts->host_traddr, ctrl->opts->host_traddr)) return false; } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) || (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) { return false; } if ((opts->mask & NVMF_OPT_HOST_IFACE) && (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) { if (strcmp(opts->host_iface, ctrl->opts->host_iface)) return false; } else if ((opts->mask & NVMF_OPT_HOST_IFACE) || (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) { return false; } return true; } EXPORT_SYMBOL_GPL(nvmf_ip_options_match); static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts, unsigned int allowed_opts) { if (opts->mask & ~allowed_opts) { unsigned int i; for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { if ((opt_tokens[i].token & opts->mask) && (opt_tokens[i].token & ~allowed_opts)) { pr_warn("invalid parameter '%s'\n", opt_tokens[i].pattern); } } return -EINVAL; } return 0; } void nvmf_free_options(struct nvmf_ctrl_options *opts) { nvmf_host_put(opts->host); kfree(opts->transport); kfree(opts->traddr); kfree(opts->trsvcid); kfree(opts->subsysnqn); kfree(opts->host_traddr); kfree(opts->host_iface); kfree(opts->dhchap_secret); kfree(opts->dhchap_ctrl_secret); kfree(opts); } EXPORT_SYMBOL_GPL(nvmf_free_options); #define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN) #define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \ NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\ NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\ NVMF_OPT_DHCHAP_CTRL_SECRET) static struct nvme_ctrl * nvmf_create_ctrl(struct device *dev, const char *buf) { struct nvmf_ctrl_options *opts; struct nvmf_transport_ops *ops; struct nvme_ctrl *ctrl; int ret; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return ERR_PTR(-ENOMEM); ret = nvmf_parse_options(opts, buf); if (ret) goto out_free_opts; request_module("nvme-%s", opts->transport); /* * Check the generic options first as we need a valid transport for * the lookup below. Then clear the generic flags so that transport * drivers don't have to care about them. */ ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS); if (ret) goto out_free_opts; opts->mask &= ~NVMF_REQUIRED_OPTS; down_read(&nvmf_transports_rwsem); ops = nvmf_lookup_transport(opts); if (!ops) { pr_info("no handler found for transport %s.\n", opts->transport); ret = -EINVAL; goto out_unlock; } if (!try_module_get(ops->module)) { ret = -EBUSY; goto out_unlock; } up_read(&nvmf_transports_rwsem); ret = nvmf_check_required_opts(opts, ops->required_opts); if (ret) goto out_module_put; ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS | ops->allowed_opts | ops->required_opts); if (ret) goto out_module_put; ctrl = ops->create_ctrl(dev, opts); if (IS_ERR(ctrl)) { ret = PTR_ERR(ctrl); goto out_module_put; } module_put(ops->module); return ctrl; out_module_put: module_put(ops->module); goto out_free_opts; out_unlock: up_read(&nvmf_transports_rwsem); out_free_opts: nvmf_free_options(opts); return ERR_PTR(ret); } static struct class *nvmf_class; static struct device *nvmf_device; static DEFINE_MUTEX(nvmf_dev_mutex); static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf, size_t count, loff_t *pos) { struct seq_file *seq_file = file->private_data; struct nvme_ctrl *ctrl; const char *buf; int ret = 0; if (count > PAGE_SIZE) return -ENOMEM; buf = memdup_user_nul(ubuf, count); if (IS_ERR(buf)) return PTR_ERR(buf); mutex_lock(&nvmf_dev_mutex); if (seq_file->private) { ret = -EINVAL; goto out_unlock; } ctrl = nvmf_create_ctrl(nvmf_device, buf); if (IS_ERR(ctrl)) { ret = PTR_ERR(ctrl); goto out_unlock; } seq_file->private = ctrl; out_unlock: mutex_unlock(&nvmf_dev_mutex); kfree(buf); return ret ? ret : count; } static void __nvmf_concat_opt_tokens(struct seq_file *seq_file) { const struct match_token *tok; int idx; /* * Add dummy entries for instance and cntlid to * signal an invalid/non-existing controller */ seq_puts(seq_file, "instance=-1,cntlid=-1"); for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) { tok = &opt_tokens[idx]; if (tok->token == NVMF_OPT_ERR) continue; seq_puts(seq_file, ","); seq_puts(seq_file, tok->pattern); } seq_puts(seq_file, "\n"); } static int nvmf_dev_show(struct seq_file *seq_file, void *private) { struct nvme_ctrl *ctrl; mutex_lock(&nvmf_dev_mutex); ctrl = seq_file->private; if (!ctrl) { __nvmf_concat_opt_tokens(seq_file); goto out_unlock; } seq_printf(seq_file, "instance=%d,cntlid=%d\n", ctrl->instance, ctrl->cntlid); out_unlock: mutex_unlock(&nvmf_dev_mutex); return 0; } static int nvmf_dev_open(struct inode *inode, struct file *file) { /* * The miscdevice code initializes file->private_data, but doesn't * make use of it later. */ file->private_data = NULL; return single_open(file, nvmf_dev_show, NULL); } static int nvmf_dev_release(struct inode *inode, struct file *file) { struct seq_file *seq_file = file->private_data; struct nvme_ctrl *ctrl = seq_file->private; if (ctrl) nvme_put_ctrl(ctrl); return single_release(inode, file); } static const struct file_operations nvmf_dev_fops = { .owner = THIS_MODULE, .write = nvmf_dev_write, .read = seq_read, .open = nvmf_dev_open, .release = nvmf_dev_release, }; static struct miscdevice nvmf_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "nvme-fabrics", .fops = &nvmf_dev_fops, }; static int __init nvmf_init(void) { int ret; nvmf_default_host = nvmf_host_default(); if (!nvmf_default_host) return -ENOMEM; nvmf_class = class_create("nvme-fabrics"); if (IS_ERR(nvmf_class)) { pr_err("couldn't register class nvme-fabrics\n"); ret = PTR_ERR(nvmf_class); goto out_free_host; } nvmf_device = device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl"); if (IS_ERR(nvmf_device)) { pr_err("couldn't create nvme-fabrics device!\n"); ret = PTR_ERR(nvmf_device); goto out_destroy_class; } ret = misc_register(&nvmf_misc); if (ret) { pr_err("couldn't register misc device: %d\n", ret); goto out_destroy_device; } return 0; out_destroy_device: device_destroy(nvmf_class, MKDEV(0, 0)); out_destroy_class: class_destroy(nvmf_class); out_free_host: nvmf_host_put(nvmf_default_host); return ret; } static void __exit nvmf_exit(void) { misc_deregister(&nvmf_misc); device_destroy(nvmf_class, MKDEV(0, 0)); class_destroy(nvmf_class); nvmf_host_put(nvmf_default_host); BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8); BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16); BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16); BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16); BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16); } MODULE_LICENSE("GPL v2"); module_init(nvmf_init); module_exit(nvmf_exit);
linux-master
drivers/nvme/host/fabrics.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015 Intel Corporation * Keith Busch <[email protected]> */ #include <linux/blkdev.h> #include <linux/pr.h> #include <asm/unaligned.h> #include "nvme.h" static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type) { switch (type) { case PR_WRITE_EXCLUSIVE: return NVME_PR_WRITE_EXCLUSIVE; case PR_EXCLUSIVE_ACCESS: return NVME_PR_EXCLUSIVE_ACCESS; case PR_WRITE_EXCLUSIVE_REG_ONLY: return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY; case PR_EXCLUSIVE_ACCESS_REG_ONLY: return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY; case PR_WRITE_EXCLUSIVE_ALL_REGS: return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS; case PR_EXCLUSIVE_ACCESS_ALL_REGS: return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS; } return 0; } static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type) { switch (type) { case NVME_PR_WRITE_EXCLUSIVE: return PR_WRITE_EXCLUSIVE; case NVME_PR_EXCLUSIVE_ACCESS: return PR_EXCLUSIVE_ACCESS; case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY: return PR_WRITE_EXCLUSIVE_REG_ONLY; case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY: return PR_EXCLUSIVE_ACCESS_REG_ONLY; case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS: return PR_WRITE_EXCLUSIVE_ALL_REGS; case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS: return PR_EXCLUSIVE_ACCESS_ALL_REGS; } return 0; } static int nvme_send_ns_head_pr_command(struct block_device *bdev, struct nvme_command *c, void *data, unsigned int data_len) { struct nvme_ns_head *head = bdev->bd_disk->private_data; int srcu_idx = srcu_read_lock(&head->srcu); struct nvme_ns *ns = nvme_find_path(head); int ret = -EWOULDBLOCK; if (ns) { c->common.nsid = cpu_to_le32(ns->head->ns_id); ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len); } srcu_read_unlock(&head->srcu, srcu_idx); return ret; } static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, void *data, unsigned int data_len) { c->common.nsid = cpu_to_le32(ns->head->ns_id); return nvme_submit_sync_cmd(ns->queue, c, data, data_len); } static int nvme_sc_to_pr_err(int nvme_sc) { if (nvme_is_path_error(nvme_sc)) return PR_STS_PATH_FAILED; switch (nvme_sc) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: return PR_STS_RESERVATION_CONFLICT; case NVME_SC_ONCS_NOT_SUPPORTED: return -EOPNOTSUPP; case NVME_SC_BAD_ATTRIBUTES: case NVME_SC_INVALID_OPCODE: case NVME_SC_INVALID_FIELD: case NVME_SC_INVALID_NS: return -EINVAL; default: return PR_STS_IOERR; } } static int nvme_send_pr_command(struct block_device *bdev, struct nvme_command *c, void *data, unsigned int data_len) { if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && bdev->bd_disk->fops == &nvme_ns_head_ops) return nvme_send_ns_head_pr_command(bdev, c, data, data_len); return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data, data_len); } static int nvme_pr_command(struct block_device *bdev, u32 cdw10, u64 key, u64 sa_key, u8 op) { struct nvme_command c = { }; u8 data[16] = { 0, }; int ret; put_unaligned_le64(key, &data[0]); put_unaligned_le64(sa_key, &data[8]); c.common.opcode = op; c.common.cdw10 = cpu_to_le32(cdw10); ret = nvme_send_pr_command(bdev, &c, data, sizeof(data)); if (ret < 0) return ret; return nvme_sc_to_pr_err(ret); } static int nvme_pr_register(struct block_device *bdev, u64 old, u64 new, unsigned flags) { u32 cdw10; if (flags & ~PR_FL_IGNORE_KEY) return -EOPNOTSUPP; cdw10 = old ? 2 : 0; cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); } static int nvme_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, unsigned flags) { u32 cdw10; if (flags & ~PR_FL_IGNORE_KEY) return -EOPNOTSUPP; cdw10 = nvme_pr_type_from_blk(type) << 8; cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); } static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, enum pr_type type, bool abort) { u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1); return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); } static int nvme_pr_clear(struct block_device *bdev, u64 key) { u32 cdw10 = 1 | (key ? 0 : 1 << 3); return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3); return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } static int nvme_pr_resv_report(struct block_device *bdev, void *data, u32 data_len, bool *eds) { struct nvme_command c = { }; int ret; c.common.opcode = nvme_cmd_resv_report; c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len)); c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT); *eds = true; retry: ret = nvme_send_pr_command(bdev, &c, data, data_len); if (ret == NVME_SC_HOST_ID_INCONSIST && c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) { c.common.cdw11 = 0; *eds = false; goto retry; } if (ret < 0) return ret; return nvme_sc_to_pr_err(ret); } static int nvme_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info) { u32 rse_len, num_keys = keys_info->num_keys; struct nvme_reservation_status_ext *rse; int ret, i; bool eds; /* * Assume we are using 128-bit host IDs and allocate a buffer large * enough to get enough keys to fill the return keys buffer. */ rse_len = struct_size(rse, regctl_eds, num_keys); rse = kzalloc(rse_len, GFP_KERNEL); if (!rse) return -ENOMEM; ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds); if (ret) goto free_rse; keys_info->generation = le32_to_cpu(rse->gen); keys_info->num_keys = get_unaligned_le16(&rse->regctl); num_keys = min(num_keys, keys_info->num_keys); for (i = 0; i < num_keys; i++) { if (eds) { keys_info->keys[i] = le64_to_cpu(rse->regctl_eds[i].rkey); } else { struct nvme_reservation_status *rs; rs = (struct nvme_reservation_status *)rse; keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey); } } free_rse: kfree(rse); return ret; } static int nvme_pr_read_reservation(struct block_device *bdev, struct pr_held_reservation *resv) { struct nvme_reservation_status_ext tmp_rse, *rse; int ret, i, num_regs; u32 rse_len; bool eds; get_num_regs: /* * Get the number of registrations so we know how big to allocate * the response buffer. */ ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds); if (ret) return ret; num_regs = get_unaligned_le16(&tmp_rse.regctl); if (!num_regs) { resv->generation = le32_to_cpu(tmp_rse.gen); return 0; } rse_len = struct_size(rse, regctl_eds, num_regs); rse = kzalloc(rse_len, GFP_KERNEL); if (!rse) return -ENOMEM; ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds); if (ret) goto free_rse; if (num_regs != get_unaligned_le16(&rse->regctl)) { kfree(rse); goto get_num_regs; } resv->generation = le32_to_cpu(rse->gen); resv->type = block_pr_type_from_nvme(rse->rtype); for (i = 0; i < num_regs; i++) { if (eds) { if (rse->regctl_eds[i].rcsts) { resv->key = le64_to_cpu(rse->regctl_eds[i].rkey); break; } } else { struct nvme_reservation_status *rs; rs = (struct nvme_reservation_status *)rse; if (rs->regctl_ds[i].rcsts) { resv->key = le64_to_cpu(rs->regctl_ds[i].rkey); break; } } } free_rse: kfree(rse); return ret; } const struct pr_ops nvme_pr_ops = { .pr_register = nvme_pr_register, .pr_reserve = nvme_pr_reserve, .pr_release = nvme_pr_release, .pr_preempt = nvme_pr_preempt, .pr_clear = nvme_pr_clear, .pr_read_keys = nvme_pr_read_keys, .pr_read_reservation = nvme_pr_read_reservation, };
linux-master
drivers/nvme/host/pr.c
// SPDX-License-Identifier: GPL-2.0 /* * NVM Express target device driver tracepoints * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH */ #include <asm/unaligned.h> #include "trace.h" static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u8 cns = cdw10[0]; u16 ctrlid = get_unaligned_le16(cdw10 + 2); trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_admin_get_features(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u8 fid = cdw10[0]; u8 sel = cdw10[1] & 0x7; u32 cdw11 = get_unaligned_le32(cdw10 + 4); trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_get_lba_status(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u64 slba = get_unaligned_le64(cdw10); u32 mndw = get_unaligned_le32(cdw10 + 8); u16 rl = get_unaligned_le16(cdw10 + 12); u8 atype = cdw10[15]; trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u", slba, mndw, rl, atype); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_admin_set_features(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u8 fid = cdw10[0]; u8 sv = cdw10[3] & 0x8; u32 cdw11 = get_unaligned_le32(cdw10 + 4); trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u64 slba = get_unaligned_le64(cdw10); u16 length = get_unaligned_le16(cdw10 + 8); u16 control = get_unaligned_le16(cdw10 + 10); u32 dsmgmt = get_unaligned_le32(cdw10 + 12); u32 reftag = get_unaligned_le32(cdw10 + 16); trace_seq_printf(p, "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u", slba, length, control, dsmgmt, reftag); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_dsm(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, "nr=%u, attributes=%u", get_unaligned_le32(cdw10), get_unaligned_le32(cdw10 + 4)); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_common(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, "cdw10=%*ph", 24, cdw10); trace_seq_putc(p, 0); return ret; } const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, u8 *cdw10) { switch (opcode) { case nvme_admin_identify: return nvmet_trace_admin_identify(p, cdw10); case nvme_admin_set_features: return nvmet_trace_admin_set_features(p, cdw10); case nvme_admin_get_features: return nvmet_trace_admin_get_features(p, cdw10); case nvme_admin_get_lba_status: return nvmet_trace_get_lba_status(p, cdw10); default: return nvmet_trace_common(p, cdw10); } } const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, u8 *cdw10) { switch (opcode) { case nvme_cmd_read: case nvme_cmd_write: case nvme_cmd_write_zeroes: return nvmet_trace_read_write(p, cdw10); case nvme_cmd_dsm: return nvmet_trace_dsm(p, cdw10); default: return nvmet_trace_common(p, cdw10); } } static const char *nvmet_trace_fabrics_property_set(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); u8 attrib = spc[0]; u32 ofst = get_unaligned_le32(spc + 4); u64 value = get_unaligned_le64(spc + 8); trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx", attrib, ofst, value); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_fabrics_connect(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); u16 recfmt = get_unaligned_le16(spc); u16 qid = get_unaligned_le16(spc + 2); u16 sqsize = get_unaligned_le16(spc + 4); u8 cattr = spc[6]; u32 kato = get_unaligned_le32(spc + 8); trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u", recfmt, qid, sqsize, cattr, kato); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); u8 attrib = spc[0]; u32 ofst = get_unaligned_le32(spc + 4); trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst); trace_seq_putc(p, 0); return ret; } static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, "specific=%*ph", 24, spc); trace_seq_putc(p, 0); return ret; } const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype, u8 *spc) { switch (fctype) { case nvme_fabrics_type_property_set: return nvmet_trace_fabrics_property_set(p, spc); case nvme_fabrics_type_connect: return nvmet_trace_fabrics_connect(p, spc); case nvme_fabrics_type_property_get: return nvmet_trace_fabrics_property_get(p, spc); default: return nvmet_trace_fabrics_common(p, spc); } } const char *nvmet_trace_disk_name(struct trace_seq *p, char *name) { const char *ret = trace_seq_buffer_ptr(p); if (*name) trace_seq_printf(p, "disk=%s, ", name); trace_seq_putc(p, 0); return ret; } const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl) { const char *ret = trace_seq_buffer_ptr(p); /* * XXX: We don't know the controller instance before executing the * connect command itself because the connect command for the admin * queue will not provide the cntlid which will be allocated in this * command. In case of io queues, the controller instance will be * mapped by the extra data of the connect command. * If we can know the extra data of the connect command in this stage, * we can update this print statement later. */ if (ctrl) trace_seq_printf(p, "%d", ctrl->cntlid); else trace_seq_printf(p, "_"); trace_seq_putc(p, 0); return ret; }
linux-master
drivers/nvme/target/trace.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics DH-HMAC-CHAP authentication. * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions. * All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <crypto/hash.h> #include <linux/crc32.h> #include <linux/base64.h> #include <linux/ctype.h> #include <linux/random.h> #include <linux/nvme-auth.h> #include <asm/unaligned.h> #include "nvmet.h" int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, bool set_ctrl) { unsigned char key_hash; char *dhchap_secret; if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1) return -EINVAL; if (key_hash > 3) { pr_warn("Invalid DH-HMAC-CHAP hash id %d\n", key_hash); return -EINVAL; } if (key_hash > 0) { /* Validate selected hash algorithm */ const char *hmac = nvme_auth_hmac_name(key_hash); if (!crypto_has_shash(hmac, 0, 0)) { pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac); return -ENOTSUPP; } } dhchap_secret = kstrdup(secret, GFP_KERNEL); if (!dhchap_secret) return -ENOMEM; if (set_ctrl) { kfree(host->dhchap_ctrl_secret); host->dhchap_ctrl_secret = strim(dhchap_secret); host->dhchap_ctrl_key_hash = key_hash; } else { kfree(host->dhchap_secret); host->dhchap_secret = strim(dhchap_secret); host->dhchap_key_hash = key_hash; } return 0; } int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id) { const char *dhgroup_kpp; int ret = 0; pr_debug("%s: ctrl %d selecting dhgroup %d\n", __func__, ctrl->cntlid, dhgroup_id); if (ctrl->dh_tfm) { if (ctrl->dh_gid == dhgroup_id) { pr_debug("%s: ctrl %d reuse existing DH group %d\n", __func__, ctrl->cntlid, dhgroup_id); return 0; } crypto_free_kpp(ctrl->dh_tfm); ctrl->dh_tfm = NULL; ctrl->dh_gid = 0; } if (dhgroup_id == NVME_AUTH_DHGROUP_NULL) return 0; dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id); if (!dhgroup_kpp) { pr_debug("%s: ctrl %d invalid DH group %d\n", __func__, ctrl->cntlid, dhgroup_id); return -EINVAL; } ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0); if (IS_ERR(ctrl->dh_tfm)) { pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n", __func__, ctrl->cntlid, dhgroup_id, PTR_ERR(ctrl->dh_tfm)); ret = PTR_ERR(ctrl->dh_tfm); ctrl->dh_tfm = NULL; ctrl->dh_gid = 0; } else { ctrl->dh_gid = dhgroup_id; pr_debug("%s: ctrl %d setup DH group %d\n", __func__, ctrl->cntlid, ctrl->dh_gid); ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid); if (ret < 0) { pr_debug("%s: ctrl %d failed to generate private key, err %d\n", __func__, ctrl->cntlid, ret); kfree_sensitive(ctrl->dh_key); return ret; } ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm); kfree_sensitive(ctrl->dh_key); ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL); if (!ctrl->dh_key) { pr_warn("ctrl %d failed to allocate public key\n", ctrl->cntlid); return -ENOMEM; } ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key, ctrl->dh_keysize); if (ret < 0) { pr_warn("ctrl %d failed to generate public key\n", ctrl->cntlid); kfree(ctrl->dh_key); ctrl->dh_key = NULL; } } return ret; } int nvmet_setup_auth(struct nvmet_ctrl *ctrl) { int ret = 0; struct nvmet_host_link *p; struct nvmet_host *host = NULL; const char *hash_name; down_read(&nvmet_config_sem); if (nvmet_is_disc_subsys(ctrl->subsys)) goto out_unlock; if (ctrl->subsys->allow_any_host) goto out_unlock; list_for_each_entry(p, &ctrl->subsys->hosts, entry) { pr_debug("check %s\n", nvmet_host_name(p->host)); if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn)) continue; host = p->host; break; } if (!host) { pr_debug("host %s not found\n", ctrl->hostnqn); ret = -EPERM; goto out_unlock; } ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id); if (ret < 0) pr_warn("Failed to setup DH group"); if (!host->dhchap_secret) { pr_debug("No authentication provided\n"); goto out_unlock; } if (host->dhchap_hash_id == ctrl->shash_id) { pr_debug("Re-use existing hash ID %d\n", ctrl->shash_id); } else { hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); if (!hash_name) { pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id); ret = -EINVAL; goto out_unlock; } ctrl->shash_id = host->dhchap_hash_id; } /* Skip the 'DHHC-1:XX:' prefix */ nvme_auth_free_key(ctrl->host_key); ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10, host->dhchap_key_hash); if (IS_ERR(ctrl->host_key)) { ret = PTR_ERR(ctrl->host_key); ctrl->host_key = NULL; goto out_free_hash; } pr_debug("%s: using hash %s key %*ph\n", __func__, ctrl->host_key->hash > 0 ? nvme_auth_hmac_name(ctrl->host_key->hash) : "none", (int)ctrl->host_key->len, ctrl->host_key->key); nvme_auth_free_key(ctrl->ctrl_key); if (!host->dhchap_ctrl_secret) { ctrl->ctrl_key = NULL; goto out_unlock; } ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10, host->dhchap_ctrl_key_hash); if (IS_ERR(ctrl->ctrl_key)) { ret = PTR_ERR(ctrl->ctrl_key); ctrl->ctrl_key = NULL; goto out_free_hash; } pr_debug("%s: using ctrl hash %s key %*ph\n", __func__, ctrl->ctrl_key->hash > 0 ? nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none", (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key); out_free_hash: if (ret) { if (ctrl->host_key) { nvme_auth_free_key(ctrl->host_key); ctrl->host_key = NULL; } ctrl->shash_id = 0; } out_unlock: up_read(&nvmet_config_sem); return ret; } void nvmet_auth_sq_free(struct nvmet_sq *sq) { cancel_delayed_work(&sq->auth_expired_work); kfree(sq->dhchap_c1); sq->dhchap_c1 = NULL; kfree(sq->dhchap_c2); sq->dhchap_c2 = NULL; kfree(sq->dhchap_skey); sq->dhchap_skey = NULL; } void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) { ctrl->shash_id = 0; if (ctrl->dh_tfm) { crypto_free_kpp(ctrl->dh_tfm); ctrl->dh_tfm = NULL; ctrl->dh_gid = 0; } kfree_sensitive(ctrl->dh_key); ctrl->dh_key = NULL; if (ctrl->host_key) { nvme_auth_free_key(ctrl->host_key); ctrl->host_key = NULL; } if (ctrl->ctrl_key) { nvme_auth_free_key(ctrl->ctrl_key); ctrl->ctrl_key = NULL; } } bool nvmet_check_auth_status(struct nvmet_req *req) { if (req->sq->ctrl->host_key && !req->sq->authenticated) return false; return true; } int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, unsigned int shash_len) { struct crypto_shash *shash_tfm; struct shash_desc *shash; struct nvmet_ctrl *ctrl = req->sq->ctrl; const char *hash_name; u8 *challenge = req->sq->dhchap_c1, *host_response; u8 buf[4]; int ret; hash_name = nvme_auth_hmac_name(ctrl->shash_id); if (!hash_name) { pr_warn("Hash ID %d invalid\n", ctrl->shash_id); return -EINVAL; } shash_tfm = crypto_alloc_shash(hash_name, 0, 0); if (IS_ERR(shash_tfm)) { pr_err("failed to allocate shash %s\n", hash_name); return PTR_ERR(shash_tfm); } if (shash_len != crypto_shash_digestsize(shash_tfm)) { pr_debug("%s: hash len mismatch (len %d digest %d)\n", __func__, shash_len, crypto_shash_digestsize(shash_tfm)); ret = -EINVAL; goto out_free_tfm; } host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn); if (IS_ERR(host_response)) { ret = PTR_ERR(host_response); goto out_free_tfm; } ret = crypto_shash_setkey(shash_tfm, host_response, ctrl->host_key->len); if (ret) goto out_free_response; if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) { challenge = kmalloc(shash_len, GFP_KERNEL); if (!challenge) { ret = -ENOMEM; goto out_free_response; } ret = nvme_auth_augmented_challenge(ctrl->shash_id, req->sq->dhchap_skey, req->sq->dhchap_skey_len, req->sq->dhchap_c1, challenge, shash_len); if (ret) goto out_free_response; } pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, req->sq->dhchap_tid); shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; goto out_free_response; } shash->tfm = shash_tfm; ret = crypto_shash_init(shash); if (ret) goto out; ret = crypto_shash_update(shash, challenge, shash_len); if (ret) goto out; put_unaligned_le32(req->sq->dhchap_s1, buf); ret = crypto_shash_update(shash, buf, 4); if (ret) goto out; put_unaligned_le16(req->sq->dhchap_tid, buf); ret = crypto_shash_update(shash, buf, 2); if (ret) goto out; memset(buf, 0, 4); ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, "HostHost", 8); if (ret) goto out; ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); if (ret) goto out; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, ctrl->subsysnqn, strlen(ctrl->subsysnqn)); if (ret) goto out; ret = crypto_shash_final(shash, response); out: if (challenge != req->sq->dhchap_c1) kfree(challenge); kfree(shash); out_free_response: kfree_sensitive(host_response); out_free_tfm: crypto_free_shash(shash_tfm); return 0; } int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, unsigned int shash_len) { struct crypto_shash *shash_tfm; struct shash_desc *shash; struct nvmet_ctrl *ctrl = req->sq->ctrl; const char *hash_name; u8 *challenge = req->sq->dhchap_c2, *ctrl_response; u8 buf[4]; int ret; hash_name = nvme_auth_hmac_name(ctrl->shash_id); if (!hash_name) { pr_warn("Hash ID %d invalid\n", ctrl->shash_id); return -EINVAL; } shash_tfm = crypto_alloc_shash(hash_name, 0, 0); if (IS_ERR(shash_tfm)) { pr_err("failed to allocate shash %s\n", hash_name); return PTR_ERR(shash_tfm); } if (shash_len != crypto_shash_digestsize(shash_tfm)) { pr_debug("%s: hash len mismatch (len %d digest %d)\n", __func__, shash_len, crypto_shash_digestsize(shash_tfm)); ret = -EINVAL; goto out_free_tfm; } ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, ctrl->subsysnqn); if (IS_ERR(ctrl_response)) { ret = PTR_ERR(ctrl_response); goto out_free_tfm; } ret = crypto_shash_setkey(shash_tfm, ctrl_response, ctrl->ctrl_key->len); if (ret) goto out_free_response; if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) { challenge = kmalloc(shash_len, GFP_KERNEL); if (!challenge) { ret = -ENOMEM; goto out_free_response; } ret = nvme_auth_augmented_challenge(ctrl->shash_id, req->sq->dhchap_skey, req->sq->dhchap_skey_len, req->sq->dhchap_c2, challenge, shash_len); if (ret) goto out_free_response; } shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; goto out_free_response; } shash->tfm = shash_tfm; ret = crypto_shash_init(shash); if (ret) goto out; ret = crypto_shash_update(shash, challenge, shash_len); if (ret) goto out; put_unaligned_le32(req->sq->dhchap_s2, buf); ret = crypto_shash_update(shash, buf, 4); if (ret) goto out; put_unaligned_le16(req->sq->dhchap_tid, buf); ret = crypto_shash_update(shash, buf, 2); if (ret) goto out; memset(buf, 0, 4); ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, "Controller", 10); if (ret) goto out; ret = crypto_shash_update(shash, ctrl->subsysnqn, strlen(ctrl->subsysnqn)); if (ret) goto out; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); if (ret) goto out; ret = crypto_shash_final(shash, response); out: if (challenge != req->sq->dhchap_c2) kfree(challenge); kfree(shash); out_free_response: kfree_sensitive(ctrl_response); out_free_tfm: crypto_free_shash(shash_tfm); return 0; } int nvmet_auth_ctrl_exponential(struct nvmet_req *req, u8 *buf, int buf_size) { struct nvmet_ctrl *ctrl = req->sq->ctrl; int ret = 0; if (!ctrl->dh_key) { pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid); return -ENOKEY; } if (buf_size != ctrl->dh_keysize) { pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n", ctrl->cntlid, ctrl->dh_keysize, buf_size); ret = -EINVAL; } else { memcpy(buf, ctrl->dh_key, buf_size); pr_debug("%s: ctrl %d public key %*ph\n", __func__, ctrl->cntlid, (int)buf_size, buf); } return ret; } int nvmet_auth_ctrl_sesskey(struct nvmet_req *req, u8 *pkey, int pkey_size) { struct nvmet_ctrl *ctrl = req->sq->ctrl; int ret; req->sq->dhchap_skey_len = ctrl->dh_keysize; req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL); if (!req->sq->dhchap_skey) return -ENOMEM; ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm, pkey, pkey_size, req->sq->dhchap_skey, req->sq->dhchap_skey_len); if (ret) pr_debug("failed to compute shared secret, err %d\n", ret); else pr_debug("%s: shared secret %*ph\n", __func__, (int)req->sq->dhchap_skey_len, req->sq->dhchap_skey); return ret; }
linux-master
drivers/nvme/target/auth.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics RDMA target. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/atomic.h> #include <linux/blk-integrity.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/nvme.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/wait.h> #include <linux/inet.h> #include <asm/unaligned.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <rdma/rw.h> #include <rdma/ib_cm.h> #include <linux/nvme-rdma.h> #include "nvmet.h" /* * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data */ #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE #define NVMET_RDMA_MAX_INLINE_SGE 4 #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) /* Assume mpsmin == device_page_size == 4KB */ #define NVMET_RDMA_MAX_MDTS 8 #define NVMET_RDMA_MAX_METADATA_MDTS 5 struct nvmet_rdma_srq; struct nvmet_rdma_cmd { struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; struct ib_cqe cqe; struct ib_recv_wr wr; struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; struct nvme_command *nvme_cmd; struct nvmet_rdma_queue *queue; struct nvmet_rdma_srq *nsrq; }; enum { NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), }; struct nvmet_rdma_rsp { struct ib_sge send_sge; struct ib_cqe send_cqe; struct ib_send_wr send_wr; struct nvmet_rdma_cmd *cmd; struct nvmet_rdma_queue *queue; struct ib_cqe read_cqe; struct ib_cqe write_cqe; struct rdma_rw_ctx rw; struct nvmet_req req; bool allocated; u8 n_rdma; u32 flags; u32 invalidate_rkey; struct list_head wait_list; struct list_head free_list; }; enum nvmet_rdma_queue_state { NVMET_RDMA_Q_CONNECTING, NVMET_RDMA_Q_LIVE, NVMET_RDMA_Q_DISCONNECTING, }; struct nvmet_rdma_queue { struct rdma_cm_id *cm_id; struct ib_qp *qp; struct nvmet_port *port; struct ib_cq *cq; atomic_t sq_wr_avail; struct nvmet_rdma_device *dev; struct nvmet_rdma_srq *nsrq; spinlock_t state_lock; enum nvmet_rdma_queue_state state; struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvmet_rdma_rsp *rsps; struct list_head free_rsps; spinlock_t rsps_lock; struct nvmet_rdma_cmd *cmds; struct work_struct release_work; struct list_head rsp_wait_list; struct list_head rsp_wr_wait_list; spinlock_t rsp_wr_wait_lock; int idx; int host_qid; int comp_vector; int recv_queue_size; int send_queue_size; struct list_head queue_list; }; struct nvmet_rdma_port { struct nvmet_port *nport; struct sockaddr_storage addr; struct rdma_cm_id *cm_id; struct delayed_work repair_work; }; struct nvmet_rdma_srq { struct ib_srq *srq; struct nvmet_rdma_cmd *cmds; struct nvmet_rdma_device *ndev; }; struct nvmet_rdma_device { struct ib_device *device; struct ib_pd *pd; struct nvmet_rdma_srq **srqs; int srq_count; size_t srq_size; struct kref ref; struct list_head entry; int inline_data_size; int inline_page_count; }; static bool nvmet_rdma_use_srq; module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); MODULE_PARM_DESC(use_srq, "Use shared receive queue."); static int srq_size_set(const char *val, const struct kernel_param *kp); static const struct kernel_param_ops srq_size_ops = { .set = srq_size_set, .get = param_get_int, }; static int nvmet_rdma_srq_size = 1024; module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644); MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)"); static DEFINE_IDA(nvmet_rdma_queue_ida); static LIST_HEAD(nvmet_rdma_queue_list); static DEFINE_MUTEX(nvmet_rdma_queue_mutex); static LIST_HEAD(device_list); static DEFINE_MUTEX(device_list_mutex); static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r); static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r); static const struct nvmet_fabrics_ops nvmet_rdma_ops; static int srq_size_set(const char *val, const struct kernel_param *kp) { int n = 0, ret; ret = kstrtoint(val, 10, &n); if (ret != 0 || n < 256) return -EINVAL; return param_set_int(val, kp); } static int num_pages(int len) { return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); } static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) { return nvme_is_write(rsp->req.cmd) && rsp->req.transfer_len && !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); } static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) { return !nvme_is_write(rsp->req.cmd) && rsp->req.transfer_len && !rsp->req.cqe->status && !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); } static inline struct nvmet_rdma_rsp * nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_rsp *rsp; unsigned long flags; spin_lock_irqsave(&queue->rsps_lock, flags); rsp = list_first_entry_or_null(&queue->free_rsps, struct nvmet_rdma_rsp, free_list); if (likely(rsp)) list_del(&rsp->free_list); spin_unlock_irqrestore(&queue->rsps_lock, flags); if (unlikely(!rsp)) { int ret; rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); if (unlikely(!rsp)) return NULL; ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); if (unlikely(ret)) { kfree(rsp); return NULL; } rsp->allocated = true; } return rsp; } static inline void nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) { unsigned long flags; if (unlikely(rsp->allocated)) { nvmet_rdma_free_rsp(rsp->queue->dev, rsp); kfree(rsp); return; } spin_lock_irqsave(&rsp->queue->rsps_lock, flags); list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); } static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c) { struct scatterlist *sg; struct ib_sge *sge; int i; if (!ndev->inline_data_size) return; sg = c->inline_sg; sge = &c->sge[1]; for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { if (sge->length) ib_dma_unmap_page(ndev->device, sge->addr, sge->length, DMA_FROM_DEVICE); if (sg_page(sg)) __free_page(sg_page(sg)); } } static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c) { struct scatterlist *sg; struct ib_sge *sge; struct page *pg; int len; int i; if (!ndev->inline_data_size) return 0; sg = c->inline_sg; sg_init_table(sg, ndev->inline_page_count); sge = &c->sge[1]; len = ndev->inline_data_size; for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { pg = alloc_page(GFP_KERNEL); if (!pg) goto out_err; sg_assign_page(sg, pg); sge->addr = ib_dma_map_page(ndev->device, pg, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ndev->device, sge->addr)) goto out_err; sge->length = min_t(int, len, PAGE_SIZE); sge->lkey = ndev->pd->local_dma_lkey; len -= sge->length; } return 0; out_err: for (; i >= 0; i--, sg--, sge--) { if (sge->length) ib_dma_unmap_page(ndev->device, sge->addr, sge->length, DMA_FROM_DEVICE); if (sg_page(sg)) __free_page(sg_page(sg)); } return -ENOMEM; } static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c, bool admin) { /* NVMe command / RDMA RECV */ c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); if (!c->nvme_cmd) goto out; c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) goto out_free_cmd; c->sge[0].length = sizeof(*c->nvme_cmd); c->sge[0].lkey = ndev->pd->local_dma_lkey; if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) goto out_unmap_cmd; c->cqe.done = nvmet_rdma_recv_done; c->wr.wr_cqe = &c->cqe; c->wr.sg_list = c->sge; c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; return 0; out_unmap_cmd: ib_dma_unmap_single(ndev->device, c->sge[0].addr, sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); out_free_cmd: kfree(c->nvme_cmd); out: return -ENOMEM; } static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c, bool admin) { if (!admin) nvmet_rdma_free_inline_pages(ndev, c); ib_dma_unmap_single(ndev->device, c->sge[0].addr, sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); kfree(c->nvme_cmd); } static struct nvmet_rdma_cmd * nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, int nr_cmds, bool admin) { struct nvmet_rdma_cmd *cmds; int ret = -EINVAL, i; cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); if (!cmds) goto out; for (i = 0; i < nr_cmds; i++) { ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); if (ret) goto out_free; } return cmds; out_free: while (--i >= 0) nvmet_rdma_free_cmd(ndev, cmds + i, admin); kfree(cmds); out: return ERR_PTR(ret); } static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) { int i; for (i = 0; i < nr_cmds; i++) nvmet_rdma_free_cmd(ndev, cmds + i, admin); kfree(cmds); } static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r) { /* NVMe CQE / RDMA SEND */ r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); if (!r->req.cqe) goto out; r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe, sizeof(*r->req.cqe), DMA_TO_DEVICE); if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) goto out_free_rsp; if (ib_dma_pci_p2p_dma_supported(ndev->device)) r->req.p2p_client = &ndev->device->dev; r->send_sge.length = sizeof(*r->req.cqe); r->send_sge.lkey = ndev->pd->local_dma_lkey; r->send_cqe.done = nvmet_rdma_send_done; r->send_wr.wr_cqe = &r->send_cqe; r->send_wr.sg_list = &r->send_sge; r->send_wr.num_sge = 1; r->send_wr.send_flags = IB_SEND_SIGNALED; /* Data In / RDMA READ */ r->read_cqe.done = nvmet_rdma_read_data_done; /* Data Out / RDMA WRITE */ r->write_cqe.done = nvmet_rdma_write_data_done; return 0; out_free_rsp: kfree(r->req.cqe); out: return -ENOMEM; } static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r) { ib_dma_unmap_single(ndev->device, r->send_sge.addr, sizeof(*r->req.cqe), DMA_TO_DEVICE); kfree(r->req.cqe); } static int nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_device *ndev = queue->dev; int nr_rsps = queue->recv_queue_size * 2; int ret = -EINVAL, i; queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), GFP_KERNEL); if (!queue->rsps) goto out; for (i = 0; i < nr_rsps; i++) { struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; ret = nvmet_rdma_alloc_rsp(ndev, rsp); if (ret) goto out_free; list_add_tail(&rsp->free_list, &queue->free_rsps); } return 0; out_free: while (--i >= 0) { struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; list_del(&rsp->free_list); nvmet_rdma_free_rsp(ndev, rsp); } kfree(queue->rsps); out: return ret; } static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_device *ndev = queue->dev; int i, nr_rsps = queue->recv_queue_size * 2; for (i = 0; i < nr_rsps; i++) { struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; list_del(&rsp->free_list); nvmet_rdma_free_rsp(ndev, rsp); } kfree(queue->rsps); } static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *cmd) { int ret; ib_dma_sync_single_for_device(ndev->device, cmd->sge[0].addr, cmd->sge[0].length, DMA_FROM_DEVICE); if (cmd->nsrq) ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL); else ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); if (unlikely(ret)) pr_err("post_recv cmd failed\n"); return ret; } static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) { spin_lock(&queue->rsp_wr_wait_lock); while (!list_empty(&queue->rsp_wr_wait_list)) { struct nvmet_rdma_rsp *rsp; bool ret; rsp = list_entry(queue->rsp_wr_wait_list.next, struct nvmet_rdma_rsp, wait_list); list_del(&rsp->wait_list); spin_unlock(&queue->rsp_wr_wait_lock); ret = nvmet_rdma_execute_command(rsp); spin_lock(&queue->rsp_wr_wait_lock); if (!ret) { list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); break; } } spin_unlock(&queue->rsp_wr_wait_lock); } static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr) { struct ib_mr_status mr_status; int ret; u16 status = 0; ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { pr_err("ib_check_mr_status failed, ret %d\n", ret); return NVME_SC_INVALID_PI; } if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { switch (mr_status.sig_err.err_type) { case IB_SIG_BAD_GUARD: status = NVME_SC_GUARD_CHECK; break; case IB_SIG_BAD_REFTAG: status = NVME_SC_REFTAG_CHECK; break; case IB_SIG_BAD_APPTAG: status = NVME_SC_APPTAG_CHECK; break; } pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n", mr_status.sig_err.err_type, mr_status.sig_err.expected, mr_status.sig_err.actual); } return status; } static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi, struct nvme_command *cmd, struct ib_sig_domain *domain, u16 control, u8 pi_type) { domain->sig_type = IB_SIG_TYPE_T10_DIF; domain->sig.dif.bg_type = IB_T10DIF_CRC; domain->sig.dif.pi_interval = 1 << bi->interval_exp; domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); if (control & NVME_RW_PRINFO_PRCHK_REF) domain->sig.dif.ref_remap = true; domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); domain->sig.dif.app_escape = true; if (pi_type == NVME_NS_DPS_PI_TYPE3) domain->sig.dif.ref_escape = true; } static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req, struct ib_sig_attrs *sig_attrs) { struct nvme_command *cmd = req->cmd; u16 control = le16_to_cpu(cmd->rw.control); u8 pi_type = req->ns->pi_type; struct blk_integrity *bi; bi = bdev_get_integrity(req->ns->bdev); memset(sig_attrs, 0, sizeof(*sig_attrs)); if (control & NVME_RW_PRINFO_PRACT) { /* for WRITE_INSERT/READ_STRIP no wire domain */ sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, pi_type); /* Clear the PRACT bit since HCA will generate/verify the PI */ control &= ~NVME_RW_PRINFO_PRACT; cmd->rw.control = cpu_to_le16(control); /* PI is added by the HW */ req->transfer_len += req->metadata_len; } else { /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, pi_type); nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, pi_type); } if (control & NVME_RW_PRINFO_PRCHK_REF) sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; if (control & NVME_RW_PRINFO_PRCHK_GUARD) sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; if (control & NVME_RW_PRINFO_PRCHK_APP) sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; } static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key, struct ib_sig_attrs *sig_attrs) { struct rdma_cm_id *cm_id = rsp->queue->cm_id; struct nvmet_req *req = &rsp->req; int ret; if (req->metadata_len) ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, cm_id->port_num, req->sg, req->sg_cnt, req->metadata_sg, req->metadata_sg_cnt, sig_attrs, addr, key, nvmet_data_dir(req)); else ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, req->sg, req->sg_cnt, 0, addr, key, nvmet_data_dir(req)); return ret; } static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp) { struct rdma_cm_id *cm_id = rsp->queue->cm_id; struct nvmet_req *req = &rsp->req; if (req->metadata_len) rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, cm_id->port_num, req->sg, req->sg_cnt, req->metadata_sg, req->metadata_sg_cnt, nvmet_data_dir(req)); else rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, req->sg, req->sg_cnt, nvmet_data_dir(req)); } static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) { struct nvmet_rdma_queue *queue = rsp->queue; atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); if (rsp->n_rdma) nvmet_rdma_rw_ctx_destroy(rsp); if (rsp->req.sg != rsp->cmd->inline_sg) nvmet_req_free_sgls(&rsp->req); if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) nvmet_rdma_process_wr_wait_list(queue); nvmet_rdma_put_rsp(rsp); } static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) { if (queue->nvme_sq.ctrl) { nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); } else { /* * we didn't setup the controller yet in case * of admin connect error, just disconnect and * cleanup the queue */ nvmet_rdma_queue_disconnect(queue); } } static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); struct nvmet_rdma_queue *queue = wc->qp->qp_context; nvmet_rdma_release_rsp(rsp); if (unlikely(wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)) { pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } } static void nvmet_rdma_queue_response(struct nvmet_req *req) { struct nvmet_rdma_rsp *rsp = container_of(req, struct nvmet_rdma_rsp, req); struct rdma_cm_id *cm_id = rsp->queue->cm_id; struct ib_send_wr *first_wr; if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; } else { rsp->send_wr.opcode = IB_WR_SEND; } if (nvmet_rdma_need_data_out(rsp)) { if (rsp->req.metadata_len) first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, cm_id->port_num, &rsp->write_cqe, NULL); else first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, cm_id->port_num, NULL, &rsp->send_wr); } else { first_wr = &rsp->send_wr; } nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); ib_dma_sync_single_for_device(rsp->queue->dev->device, rsp->send_sge.addr, rsp->send_sge.length, DMA_TO_DEVICE); if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { pr_err("sending cmd response failed\n"); nvmet_rdma_release_rsp(rsp); } } static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); struct nvmet_rdma_queue *queue = wc->qp->qp_context; u16 status = 0; WARN_ON(rsp->n_rdma <= 0); atomic_add(rsp->n_rdma, &queue->sq_wr_avail); rsp->n_rdma = 0; if (unlikely(wc->status != IB_WC_SUCCESS)) { nvmet_rdma_rw_ctx_destroy(rsp); nvmet_req_uninit(&rsp->req); nvmet_rdma_release_rsp(rsp); if (wc->status != IB_WC_WR_FLUSH_ERR) { pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } return; } if (rsp->req.metadata_len) status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); nvmet_rdma_rw_ctx_destroy(rsp); if (unlikely(status)) nvmet_req_complete(&rsp->req, status); else rsp->req.execute(&rsp->req); } static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); struct nvmet_rdma_queue *queue = wc->qp->qp_context; struct rdma_cm_id *cm_id = rsp->queue->cm_id; u16 status; if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) return; WARN_ON(rsp->n_rdma <= 0); atomic_add(rsp->n_rdma, &queue->sq_wr_avail); rsp->n_rdma = 0; if (unlikely(wc->status != IB_WC_SUCCESS)) { nvmet_rdma_rw_ctx_destroy(rsp); nvmet_req_uninit(&rsp->req); nvmet_rdma_release_rsp(rsp); if (wc->status != IB_WC_WR_FLUSH_ERR) { pr_info("RDMA WRITE for CQE failed with status %s (%d).\n", ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } return; } /* * Upon RDMA completion check the signature status * - if succeeded send good NVMe response * - if failed send bad NVMe response with appropriate error */ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); if (unlikely(status)) rsp->req.cqe->status = cpu_to_le16(status << 1); nvmet_rdma_rw_ctx_destroy(rsp); if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) { pr_err("sending cmd response failed\n"); nvmet_rdma_release_rsp(rsp); } } static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, u64 off) { int sg_count = num_pages(len); struct scatterlist *sg; int i; sg = rsp->cmd->inline_sg; for (i = 0; i < sg_count; i++, sg++) { if (i < sg_count - 1) sg_unmark_end(sg); else sg_mark_end(sg); sg->offset = off; sg->length = min_t(int, len, PAGE_SIZE - off); len -= sg->length; if (!i) off = 0; } rsp->req.sg = rsp->cmd->inline_sg; rsp->req.sg_cnt = sg_count; } static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) { struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; u64 off = le64_to_cpu(sgl->addr); u32 len = le32_to_cpu(sgl->length); if (!nvme_is_write(rsp->req.cmd)) { rsp->req.error_loc = offsetof(struct nvme_common_command, opcode); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } if (off + len > rsp->queue->dev->inline_data_size) { pr_err("invalid inline data offset!\n"); return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; } /* no data command? */ if (!len) return 0; nvmet_rdma_use_inline_sg(rsp, len, off); rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; rsp->req.transfer_len += len; return 0; } static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, struct nvme_keyed_sgl_desc *sgl, bool invalidate) { u64 addr = le64_to_cpu(sgl->addr); u32 key = get_unaligned_le32(sgl->key); struct ib_sig_attrs sig_attrs; int ret; rsp->req.transfer_len = get_unaligned_le24(sgl->length); /* no data command? */ if (!rsp->req.transfer_len) return 0; if (rsp->req.metadata_len) nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs); ret = nvmet_req_alloc_sgls(&rsp->req); if (unlikely(ret < 0)) goto error_out; ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs); if (unlikely(ret < 0)) goto error_out; rsp->n_rdma += ret; if (invalidate) { rsp->invalidate_rkey = key; rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; } return 0; error_out: rsp->req.transfer_len = 0; return NVME_SC_INTERNAL; } static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) { struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; switch (sgl->type >> 4) { case NVME_SGL_FMT_DATA_DESC: switch (sgl->type & 0xf) { case NVME_SGL_FMT_OFFSET: return nvmet_rdma_map_sgl_inline(rsp); default: pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } case NVME_KEY_SGL_FMT_DATA_DESC: switch (sgl->type & 0xf) { case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); case NVME_SGL_FMT_ADDRESS: return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); default: pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } default: pr_err("invalid SGL type: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; } } static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) { struct nvmet_rdma_queue *queue = rsp->queue; if (unlikely(atomic_sub_return(1 + rsp->n_rdma, &queue->sq_wr_avail) < 0)) { pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 1 + rsp->n_rdma, queue->idx, queue->nvme_sq.ctrl->cntlid); atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); return false; } if (nvmet_rdma_need_data_in(rsp)) { if (rdma_rw_ctx_post(&rsp->rw, queue->qp, queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { rsp->req.execute(&rsp->req); } return true; } static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, struct nvmet_rdma_rsp *cmd) { u16 status; ib_dma_sync_single_for_cpu(queue->dev->device, cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, DMA_FROM_DEVICE); ib_dma_sync_single_for_cpu(queue->dev->device, cmd->send_sge.addr, cmd->send_sge.length, DMA_TO_DEVICE); if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_rdma_ops)) return; status = nvmet_rdma_map_sgl(cmd); if (status) goto out_err; if (unlikely(!nvmet_rdma_execute_command(cmd))) { spin_lock(&queue->rsp_wr_wait_lock); list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); spin_unlock(&queue->rsp_wr_wait_lock); } return; out_err: nvmet_req_complete(&cmd->req, status); } static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_cmd *cmd = container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); struct nvmet_rdma_queue *queue = wc->qp->qp_context; struct nvmet_rdma_rsp *rsp; if (unlikely(wc->status != IB_WC_SUCCESS)) { if (wc->status != IB_WC_WR_FLUSH_ERR) { pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); nvmet_rdma_error_comp(queue); } return; } if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); nvmet_rdma_error_comp(queue); return; } cmd->queue = queue; rsp = nvmet_rdma_get_rsp(queue); if (unlikely(!rsp)) { /* * we get here only under memory pressure, * silently drop and have the host retry * as we can't even fail it. */ nvmet_rdma_post_recv(queue->dev, cmd); return; } rsp->queue = queue; rsp->cmd = cmd; rsp->flags = 0; rsp->req.cmd = cmd->nvme_cmd; rsp->req.port = queue->port; rsp->n_rdma = 0; if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { unsigned long flags; spin_lock_irqsave(&queue->state_lock, flags); if (queue->state == NVMET_RDMA_Q_CONNECTING) list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); else nvmet_rdma_put_rsp(rsp); spin_unlock_irqrestore(&queue->state_lock, flags); return; } nvmet_rdma_handle_command(queue, rsp); } static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq) { nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size, false); ib_destroy_srq(nsrq->srq); kfree(nsrq); } static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev) { int i; if (!ndev->srqs) return; for (i = 0; i < ndev->srq_count; i++) nvmet_rdma_destroy_srq(ndev->srqs[i]); kfree(ndev->srqs); } static struct nvmet_rdma_srq * nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) { struct ib_srq_init_attr srq_attr = { NULL, }; size_t srq_size = ndev->srq_size; struct nvmet_rdma_srq *nsrq; struct ib_srq *srq; int ret, i; nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL); if (!nsrq) return ERR_PTR(-ENOMEM); srq_attr.attr.max_wr = srq_size; srq_attr.attr.max_sge = 1 + ndev->inline_page_count; srq_attr.attr.srq_limit = 0; srq_attr.srq_type = IB_SRQT_BASIC; srq = ib_create_srq(ndev->pd, &srq_attr); if (IS_ERR(srq)) { ret = PTR_ERR(srq); goto out_free; } nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); if (IS_ERR(nsrq->cmds)) { ret = PTR_ERR(nsrq->cmds); goto out_destroy_srq; } nsrq->srq = srq; nsrq->ndev = ndev; for (i = 0; i < srq_size; i++) { nsrq->cmds[i].nsrq = nsrq; ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]); if (ret) goto out_free_cmds; } return nsrq; out_free_cmds: nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false); out_destroy_srq: ib_destroy_srq(srq); out_free: kfree(nsrq); return ERR_PTR(ret); } static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev) { int i, ret; if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) { /* * If SRQs aren't supported we just go ahead and use normal * non-shared receive queues. */ pr_info("SRQ requested but not supported.\n"); return 0; } ndev->srq_size = min(ndev->device->attrs.max_srq_wr, nvmet_rdma_srq_size); ndev->srq_count = min(ndev->device->num_comp_vectors, ndev->device->attrs.max_srq); ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL); if (!ndev->srqs) return -ENOMEM; for (i = 0; i < ndev->srq_count; i++) { ndev->srqs[i] = nvmet_rdma_init_srq(ndev); if (IS_ERR(ndev->srqs[i])) { ret = PTR_ERR(ndev->srqs[i]); goto err_srq; } } return 0; err_srq: while (--i >= 0) nvmet_rdma_destroy_srq(ndev->srqs[i]); kfree(ndev->srqs); return ret; } static void nvmet_rdma_free_dev(struct kref *ref) { struct nvmet_rdma_device *ndev = container_of(ref, struct nvmet_rdma_device, ref); mutex_lock(&device_list_mutex); list_del(&ndev->entry); mutex_unlock(&device_list_mutex); nvmet_rdma_destroy_srqs(ndev); ib_dealloc_pd(ndev->pd); kfree(ndev); } static struct nvmet_rdma_device * nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) { struct nvmet_rdma_port *port = cm_id->context; struct nvmet_port *nport = port->nport; struct nvmet_rdma_device *ndev; int inline_page_count; int inline_sge_count; int ret; mutex_lock(&device_list_mutex); list_for_each_entry(ndev, &device_list, entry) { if (ndev->device->node_guid == cm_id->device->node_guid && kref_get_unless_zero(&ndev->ref)) goto out_unlock; } ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); if (!ndev) goto out_err; inline_page_count = num_pages(nport->inline_data_size); inline_sge_count = max(cm_id->device->attrs.max_sge_rd, cm_id->device->attrs.max_recv_sge) - 1; if (inline_page_count > inline_sge_count) { pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n", nport->inline_data_size, cm_id->device->name, inline_sge_count * PAGE_SIZE); nport->inline_data_size = inline_sge_count * PAGE_SIZE; inline_page_count = inline_sge_count; } ndev->inline_data_size = nport->inline_data_size; ndev->inline_page_count = inline_page_count; if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)) { pr_warn("T10-PI is not supported by device %s. Disabling it\n", cm_id->device->name); nport->pi_enable = false; } ndev->device = cm_id->device; kref_init(&ndev->ref); ndev->pd = ib_alloc_pd(ndev->device, 0); if (IS_ERR(ndev->pd)) goto out_free_dev; if (nvmet_rdma_use_srq) { ret = nvmet_rdma_init_srqs(ndev); if (ret) goto out_free_pd; } list_add(&ndev->entry, &device_list); out_unlock: mutex_unlock(&device_list_mutex); pr_debug("added %s.\n", ndev->device->name); return ndev; out_free_pd: ib_dealloc_pd(ndev->pd); out_free_dev: kfree(ndev); out_err: mutex_unlock(&device_list_mutex); return NULL; } static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) { struct ib_qp_init_attr qp_attr = { }; struct nvmet_rdma_device *ndev = queue->dev; int nr_cqe, ret, i, factor; /* * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. */ nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, queue->comp_vector, IB_POLL_WORKQUEUE); if (IS_ERR(queue->cq)) { ret = PTR_ERR(queue->cq); pr_err("failed to create CQ cqe= %d ret= %d\n", nr_cqe + 1, ret); goto out; } qp_attr.qp_context = queue; qp_attr.event_handler = nvmet_rdma_qp_event; qp_attr.send_cq = queue->cq; qp_attr.recv_cq = queue->cq; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; /* +1 for drain */ qp_attr.cap.max_send_wr = queue->send_queue_size + 1; factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, 1 << NVMET_RDMA_MAX_MDTS); qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, ndev->device->attrs.max_send_sge); if (queue->nsrq) { qp_attr.srq = queue->nsrq->srq; } else { /* +1 for drain */ qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; } if (queue->port->pi_enable && queue->host_qid) qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); if (ret) { pr_err("failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } queue->qp = queue->cm_id->qp; atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, qp_attr.cap.max_send_wr, queue->cm_id); if (!queue->nsrq) { for (i = 0; i < queue->recv_queue_size; i++) { queue->cmds[i].queue = queue; ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); if (ret) goto err_destroy_qp; } } out: return ret; err_destroy_qp: rdma_destroy_qp(queue->cm_id); err_destroy_cq: ib_cq_pool_put(queue->cq, nr_cqe + 1); goto out; } static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { ib_drain_qp(queue->qp); if (queue->cm_id) rdma_destroy_id(queue->cm_id); ib_destroy_qp(queue->qp); ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 * queue->send_queue_size + 1); } static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) { pr_debug("freeing queue %d\n", queue->idx); nvmet_sq_destroy(&queue->nvme_sq); nvmet_rdma_destroy_queue_ib(queue); if (!queue->nsrq) { nvmet_rdma_free_cmds(queue->dev, queue->cmds, queue->recv_queue_size, !queue->host_qid); } nvmet_rdma_free_rsps(queue); ida_free(&nvmet_rdma_queue_ida, queue->idx); kfree(queue); } static void nvmet_rdma_release_queue_work(struct work_struct *w) { struct nvmet_rdma_queue *queue = container_of(w, struct nvmet_rdma_queue, release_work); struct nvmet_rdma_device *dev = queue->dev; nvmet_rdma_free_queue(queue); kref_put(&dev->ref, nvmet_rdma_free_dev); } static int nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, struct nvmet_rdma_queue *queue) { struct nvme_rdma_cm_req *req; req = (struct nvme_rdma_cm_req *)conn->private_data; if (!req || conn->private_data_len == 0) return NVME_RDMA_CM_INVALID_LEN; if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) return NVME_RDMA_CM_INVALID_RECFMT; queue->host_qid = le16_to_cpu(req->qid); /* * req->hsqsize corresponds to our recv queue size plus 1 * req->hrqsize corresponds to our send queue size */ queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; queue->send_queue_size = le16_to_cpu(req->hrqsize); if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) return NVME_RDMA_CM_INVALID_HSQSIZE; /* XXX: Should we enforce some kind of max for IO queues? */ return 0; } static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, enum nvme_rdma_cm_status status) { struct nvme_rdma_cm_rej rej; pr_debug("rejecting connect request: status %d (%s)\n", status, nvme_rdma_cm_msg(status)); rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); rej.sts = cpu_to_le16(status); return rdma_reject(cm_id, (void *)&rej, sizeof(rej), IB_CM_REJ_CONSUMER_DEFINED); } static struct nvmet_rdma_queue * nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct nvmet_rdma_port *port = cm_id->context; struct nvmet_rdma_queue *queue; int ret; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) { ret = NVME_RDMA_CM_NO_RSC; goto out_reject; } ret = nvmet_sq_init(&queue->nvme_sq); if (ret) { ret = NVME_RDMA_CM_NO_RSC; goto out_free_queue; } ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); if (ret) goto out_destroy_sq; /* * Schedules the actual release because calling rdma_destroy_id from * inside a CM callback would trigger a deadlock. (great API design..) */ INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); queue->dev = ndev; queue->cm_id = cm_id; queue->port = port->nport; spin_lock_init(&queue->state_lock); queue->state = NVMET_RDMA_Q_CONNECTING; INIT_LIST_HEAD(&queue->rsp_wait_list); INIT_LIST_HEAD(&queue->rsp_wr_wait_list); spin_lock_init(&queue->rsp_wr_wait_lock); INIT_LIST_HEAD(&queue->free_rsps); spin_lock_init(&queue->rsps_lock); INIT_LIST_HEAD(&queue->queue_list); queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); if (queue->idx < 0) { ret = NVME_RDMA_CM_NO_RSC; goto out_destroy_sq; } /* * Spread the io queues across completion vectors, * but still keep all admin queues on vector 0. */ queue->comp_vector = !queue->host_qid ? 0 : queue->idx % ndev->device->num_comp_vectors; ret = nvmet_rdma_alloc_rsps(queue); if (ret) { ret = NVME_RDMA_CM_NO_RSC; goto out_ida_remove; } if (ndev->srqs) { queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; } else { queue->cmds = nvmet_rdma_alloc_cmds(ndev, queue->recv_queue_size, !queue->host_qid); if (IS_ERR(queue->cmds)) { ret = NVME_RDMA_CM_NO_RSC; goto out_free_responses; } } ret = nvmet_rdma_create_queue_ib(queue); if (ret) { pr_err("%s: creating RDMA queue failed (%d).\n", __func__, ret); ret = NVME_RDMA_CM_NO_RSC; goto out_free_cmds; } return queue; out_free_cmds: if (!queue->nsrq) { nvmet_rdma_free_cmds(queue->dev, queue->cmds, queue->recv_queue_size, !queue->host_qid); } out_free_responses: nvmet_rdma_free_rsps(queue); out_ida_remove: ida_free(&nvmet_rdma_queue_ida, queue->idx); out_destroy_sq: nvmet_sq_destroy(&queue->nvme_sq); out_free_queue: kfree(queue); out_reject: nvmet_rdma_cm_reject(cm_id, ret); return NULL; } static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) { struct nvmet_rdma_queue *queue = priv; switch (event->event) { case IB_EVENT_COMM_EST: rdma_notify(queue->cm_id, event->event); break; case IB_EVENT_QP_LAST_WQE_REACHED: pr_debug("received last WQE reached event for queue=0x%p\n", queue); break; default: pr_err("received IB QP event: %s (%d)\n", ib_event_msg(event->event), event->event); break; } } static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue, struct rdma_conn_param *p) { struct rdma_conn_param param = { }; struct nvme_rdma_cm_rep priv = { }; int ret = -ENOMEM; param.rnr_retry_count = 7; param.flow_control = 1; param.initiator_depth = min_t(u8, p->initiator_depth, queue->dev->device->attrs.max_qp_init_rd_atom); param.private_data = &priv; param.private_data_len = sizeof(priv); priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); priv.crqsize = cpu_to_le16(queue->recv_queue_size); ret = rdma_accept(cm_id, &param); if (ret) pr_err("rdma_accept failed (error code = %d)\n", ret); return ret; } static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct nvmet_rdma_device *ndev; struct nvmet_rdma_queue *queue; int ret = -EINVAL; ndev = nvmet_rdma_find_get_device(cm_id); if (!ndev) { nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); return -ECONNREFUSED; } queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); if (!queue) { ret = -ENOMEM; goto put_device; } if (queue->host_qid == 0) { /* Let inflight controller teardown complete */ flush_workqueue(nvmet_wq); } ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { /* * Don't destroy the cm_id in free path, as we implicitly * destroy the cm_id here with non-zero ret code. */ queue->cm_id = NULL; goto free_queue; } mutex_lock(&nvmet_rdma_queue_mutex); list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); mutex_unlock(&nvmet_rdma_queue_mutex); return 0; free_queue: nvmet_rdma_free_queue(queue); put_device: kref_put(&ndev->ref, nvmet_rdma_free_dev); return ret; } static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) { unsigned long flags; spin_lock_irqsave(&queue->state_lock, flags); if (queue->state != NVMET_RDMA_Q_CONNECTING) { pr_warn("trying to establish a connected queue\n"); goto out_unlock; } queue->state = NVMET_RDMA_Q_LIVE; while (!list_empty(&queue->rsp_wait_list)) { struct nvmet_rdma_rsp *cmd; cmd = list_first_entry(&queue->rsp_wait_list, struct nvmet_rdma_rsp, wait_list); list_del(&cmd->wait_list); spin_unlock_irqrestore(&queue->state_lock, flags); nvmet_rdma_handle_command(queue, cmd); spin_lock_irqsave(&queue->state_lock, flags); } out_unlock: spin_unlock_irqrestore(&queue->state_lock, flags); } static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) { bool disconnect = false; unsigned long flags; pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); spin_lock_irqsave(&queue->state_lock, flags); switch (queue->state) { case NVMET_RDMA_Q_CONNECTING: while (!list_empty(&queue->rsp_wait_list)) { struct nvmet_rdma_rsp *rsp; rsp = list_first_entry(&queue->rsp_wait_list, struct nvmet_rdma_rsp, wait_list); list_del(&rsp->wait_list); nvmet_rdma_put_rsp(rsp); } fallthrough; case NVMET_RDMA_Q_LIVE: queue->state = NVMET_RDMA_Q_DISCONNECTING; disconnect = true; break; case NVMET_RDMA_Q_DISCONNECTING: break; } spin_unlock_irqrestore(&queue->state_lock, flags); if (disconnect) { rdma_disconnect(queue->cm_id); queue_work(nvmet_wq, &queue->release_work); } } static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) { bool disconnect = false; mutex_lock(&nvmet_rdma_queue_mutex); if (!list_empty(&queue->queue_list)) { list_del_init(&queue->queue_list); disconnect = true; } mutex_unlock(&nvmet_rdma_queue_mutex); if (disconnect) __nvmet_rdma_queue_disconnect(queue); } static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue) { WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); mutex_lock(&nvmet_rdma_queue_mutex); if (!list_empty(&queue->queue_list)) list_del_init(&queue->queue_list); mutex_unlock(&nvmet_rdma_queue_mutex); pr_err("failed to connect queue %d\n", queue->idx); queue_work(nvmet_wq, &queue->release_work); } /** * nvmet_rdma_device_removal() - Handle RDMA device removal * @cm_id: rdma_cm id, used for nvmet port * @queue: nvmet rdma queue (cm id qp_context) * * DEVICE_REMOVAL event notifies us that the RDMA device is about * to unplug. Note that this event can be generated on a normal * queue cm_id and/or a device bound listener cm_id (where in this * case queue will be null). * * We registered an ib_client to handle device removal for queues, * so we only need to handle the listening port cm_ids. In this case * we nullify the priv to prevent double cm_id destruction and destroying * the cm_id implicitely by returning a non-zero rc to the callout. */ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue) { struct nvmet_rdma_port *port; if (queue) { /* * This is a queue cm_id. we have registered * an ib_client to handle queues removal * so don't interfear and just return. */ return 0; } port = cm_id->context; /* * This is a listener cm_id. Make sure that * future remove_port won't invoke a double * cm_id destroy. use atomic xchg to make sure * we don't compete with remove_port. */ if (xchg(&port->cm_id, NULL) != cm_id) return 0; /* * We need to return 1 so that the core will destroy * it's own ID. What a great API design.. */ return 1; } static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct nvmet_rdma_queue *queue = NULL; int ret = 0; if (cm_id->qp) queue = cm_id->qp->qp_context; pr_debug("%s (%d): status %d id %p\n", rdma_event_msg(event->event), event->event, event->status, cm_id); switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: ret = nvmet_rdma_queue_connect(cm_id, event); break; case RDMA_CM_EVENT_ESTABLISHED: nvmet_rdma_queue_established(queue); break; case RDMA_CM_EVENT_ADDR_CHANGE: if (!queue) { struct nvmet_rdma_port *port = cm_id->context; queue_delayed_work(nvmet_wq, &port->repair_work, 0); break; } fallthrough; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: nvmet_rdma_queue_disconnect(queue); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: ret = nvmet_rdma_device_removal(cm_id, queue); break; case RDMA_CM_EVENT_REJECTED: pr_debug("Connection rejected: %s\n", rdma_reject_msg(cm_id, event->status)); fallthrough; case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_CONNECT_ERROR: nvmet_rdma_queue_connect_fail(cm_id, queue); break; default: pr_err("received unrecognized RDMA CM event %d\n", event->event); break; } return ret; } static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) { struct nvmet_rdma_queue *queue; restart: mutex_lock(&nvmet_rdma_queue_mutex); list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { if (queue->nvme_sq.ctrl == ctrl) { list_del_init(&queue->queue_list); mutex_unlock(&nvmet_rdma_queue_mutex); __nvmet_rdma_queue_disconnect(queue); goto restart; } } mutex_unlock(&nvmet_rdma_queue_mutex); } static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port) { struct nvmet_rdma_queue *queue, *tmp; struct nvmet_port *nport = port->nport; mutex_lock(&nvmet_rdma_queue_mutex); list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, queue_list) { if (queue->port != nport) continue; list_del_init(&queue->queue_list); __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); } static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port) { struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); if (cm_id) rdma_destroy_id(cm_id); /* * Destroy the remaining queues, which are not belong to any * controller yet. Do it here after the RDMA-CM was destroyed * guarantees that no new queue will be created. */ nvmet_rdma_destroy_port_queues(port); } static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) { struct sockaddr *addr = (struct sockaddr *)&port->addr; struct rdma_cm_id *cm_id; int ret; cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) { pr_err("CM ID creation failed\n"); return PTR_ERR(cm_id); } /* * Allow both IPv4 and IPv6 sockets to bind a single port * at the same time. */ ret = rdma_set_afonly(cm_id, 1); if (ret) { pr_err("rdma_set_afonly failed (%d)\n", ret); goto out_destroy_id; } ret = rdma_bind_addr(cm_id, addr); if (ret) { pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret); goto out_destroy_id; } ret = rdma_listen(cm_id, 128); if (ret) { pr_err("listening to %pISpcs failed (%d)\n", addr, ret); goto out_destroy_id; } port->cm_id = cm_id; return 0; out_destroy_id: rdma_destroy_id(cm_id); return ret; } static void nvmet_rdma_repair_port_work(struct work_struct *w) { struct nvmet_rdma_port *port = container_of(to_delayed_work(w), struct nvmet_rdma_port, repair_work); int ret; nvmet_rdma_disable_port(port); ret = nvmet_rdma_enable_port(port); if (ret) queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ); } static int nvmet_rdma_add_port(struct nvmet_port *nport) { struct nvmet_rdma_port *port; __kernel_sa_family_t af; int ret; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; nport->priv = port; port->nport = nport; INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work); switch (nport->disc_addr.adrfam) { case NVMF_ADDR_FAMILY_IP4: af = AF_INET; break; case NVMF_ADDR_FAMILY_IP6: af = AF_INET6; break; default: pr_err("address family %d not supported\n", nport->disc_addr.adrfam); ret = -EINVAL; goto out_free_port; } if (nport->inline_data_size < 0) { nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { pr_warn("inline_data_size %u is too large, reducing to %u\n", nport->inline_data_size, NVMET_RDMA_MAX_INLINE_DATA_SIZE); nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; } ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, nport->disc_addr.trsvcid, &port->addr); if (ret) { pr_err("malformed ip/port passed: %s:%s\n", nport->disc_addr.traddr, nport->disc_addr.trsvcid); goto out_free_port; } ret = nvmet_rdma_enable_port(port); if (ret) goto out_free_port; pr_info("enabling port %d (%pISpcs)\n", le16_to_cpu(nport->disc_addr.portid), (struct sockaddr *)&port->addr); return 0; out_free_port: kfree(port); return ret; } static void nvmet_rdma_remove_port(struct nvmet_port *nport) { struct nvmet_rdma_port *port = nport->priv; cancel_delayed_work_sync(&port->repair_work); nvmet_rdma_disable_port(port); kfree(port); } static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, struct nvmet_port *nport, char *traddr) { struct nvmet_rdma_port *port = nport->priv; struct rdma_cm_id *cm_id = port->cm_id; if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { struct nvmet_rdma_rsp *rsp = container_of(req, struct nvmet_rdma_rsp, req); struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; sprintf(traddr, "%pISc", addr); } else { memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); } } static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) { if (ctrl->pi_support) return NVMET_RDMA_MAX_METADATA_MDTS; return NVMET_RDMA_MAX_MDTS; } static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl) { return NVME_RDMA_MAX_QUEUE_SIZE; } static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_RDMA, .msdbd = 1, .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED, .add_port = nvmet_rdma_add_port, .remove_port = nvmet_rdma_remove_port, .queue_response = nvmet_rdma_queue_response, .delete_ctrl = nvmet_rdma_delete_ctrl, .disc_traddr = nvmet_rdma_disc_port_addr, .get_mdts = nvmet_rdma_get_mdts, .get_max_queue_size = nvmet_rdma_get_max_queue_size, }; static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) { struct nvmet_rdma_queue *queue, *tmp; struct nvmet_rdma_device *ndev; bool found = false; mutex_lock(&device_list_mutex); list_for_each_entry(ndev, &device_list, entry) { if (ndev->device == ib_device) { found = true; break; } } mutex_unlock(&device_list_mutex); if (!found) return; /* * IB Device that is used by nvmet controllers is being removed, * delete all queues using this device. */ mutex_lock(&nvmet_rdma_queue_mutex); list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, queue_list) { if (queue->dev->device != ib_device) continue; pr_info("Removing queue %d\n", queue->idx); list_del_init(&queue->queue_list); __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); flush_workqueue(nvmet_wq); } static struct ib_client nvmet_rdma_ib_client = { .name = "nvmet_rdma", .remove = nvmet_rdma_remove_one }; static int __init nvmet_rdma_init(void) { int ret; ret = ib_register_client(&nvmet_rdma_ib_client); if (ret) return ret; ret = nvmet_register_transport(&nvmet_rdma_ops); if (ret) goto err_ib_client; return 0; err_ib_client: ib_unregister_client(&nvmet_rdma_ib_client); return ret; } static void __exit nvmet_rdma_exit(void) { nvmet_unregister_transport(&nvmet_rdma_ops); ib_unregister_client(&nvmet_rdma_ib_client); WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); ida_destroy(&nvmet_rdma_queue_ida); } module_init(nvmet_rdma_init); module_exit(nvmet_rdma_exit); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
linux-master
drivers/nvme/target/rdma.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics loopback device. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/scatterlist.h> #include <linux/blk-mq.h> #include <linux/nvme.h> #include <linux/module.h> #include <linux/parser.h> #include "nvmet.h" #include "../host/nvme.h" #include "../host/fabrics.h" #define NVME_LOOP_MAX_SEGMENTS 256 struct nvme_loop_iod { struct nvme_request nvme_req; struct nvme_command cmd; struct nvme_completion cqe; struct nvmet_req req; struct nvme_loop_queue *queue; struct work_struct work; struct sg_table sg_table; struct scatterlist first_sgl[]; }; struct nvme_loop_ctrl { struct nvme_loop_queue *queues; struct blk_mq_tag_set admin_tag_set; struct list_head list; struct blk_mq_tag_set tag_set; struct nvme_loop_iod async_event_iod; struct nvme_ctrl ctrl; struct nvmet_port *port; }; static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) { return container_of(ctrl, struct nvme_loop_ctrl, ctrl); } enum nvme_loop_queue_flags { NVME_LOOP_Q_LIVE = 0, }; struct nvme_loop_queue { struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvme_loop_ctrl *ctrl; unsigned long flags; }; static LIST_HEAD(nvme_loop_ports); static DEFINE_MUTEX(nvme_loop_ports_mutex); static LIST_HEAD(nvme_loop_ctrl_list); static DEFINE_MUTEX(nvme_loop_ctrl_mutex); static void nvme_loop_queue_response(struct nvmet_req *nvme_req); static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl); static const struct nvmet_fabrics_ops nvme_loop_ops; static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue) { return queue - queue->ctrl->queues; } static void nvme_loop_complete_rq(struct request *req) { struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT); nvme_complete_rq(req); } static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue) { u32 queue_idx = nvme_loop_queue_idx(queue); if (queue_idx == 0) return queue->ctrl->admin_tag_set.tags[queue_idx]; return queue->ctrl->tag_set.tags[queue_idx - 1]; } static void nvme_loop_queue_response(struct nvmet_req *req) { struct nvme_loop_queue *queue = container_of(req->sq, struct nvme_loop_queue, nvme_sq); struct nvme_completion *cqe = req->cqe; /* * AEN requests are special as they don't time out and can * survive any kind of queue freeze and often don't respond to * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue), cqe->command_id))) { nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); } else { struct request *rq; rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id); if (!rq) { dev_err(queue->ctrl->ctrl.device, "got bad command_id %#x on queue %d\n", cqe->command_id, nvme_loop_queue_idx(queue)); return; } if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) nvme_loop_complete_rq(rq); } } static void nvme_loop_execute_work(struct work_struct *work) { struct nvme_loop_iod *iod = container_of(work, struct nvme_loop_iod, work); iod->req.execute(&iod->req); } static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nvme_ns *ns = hctx->queue->queuedata; struct nvme_loop_queue *queue = hctx->driver_data; struct request *req = bd->rq; struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags); blk_status_t ret; if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, req); ret = nvme_setup_cmd(ns, req); if (ret) return ret; nvme_start_request(req); iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; iod->req.port = queue->ctrl->port; if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, &nvme_loop_ops)) return BLK_STS_OK; if (blk_rq_nr_phys_segments(req)) { iod->sg_table.sgl = iod->first_sgl; if (sg_alloc_table_chained(&iod->sg_table, blk_rq_nr_phys_segments(req), iod->sg_table.sgl, NVME_INLINE_SG_CNT)) { nvme_cleanup_cmd(req); return BLK_STS_RESOURCE; } iod->req.sg = iod->sg_table.sgl; iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); iod->req.transfer_len = blk_rq_payload_bytes(req); } queue_work(nvmet_wq, &iod->work); return BLK_STS_OK; } static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) { struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); struct nvme_loop_queue *queue = &ctrl->queues[0]; struct nvme_loop_iod *iod = &ctrl->async_event_iod; memset(&iod->cmd, 0, sizeof(iod->cmd)); iod->cmd.common.opcode = nvme_admin_async_event; iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, &nvme_loop_ops)) { dev_err(ctrl->ctrl.device, "failed async event work\n"); return; } queue_work(nvmet_wq, &iod->work); } static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, struct nvme_loop_iod *iod, unsigned int queue_idx) { iod->req.cmd = &iod->cmd; iod->req.cqe = &iod->cqe; iod->queue = &ctrl->queues[queue_idx]; INIT_WORK(&iod->work, nvme_loop_execute_work); return 0; } static int nvme_loop_init_request(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx, unsigned int numa_node) { struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data); struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); nvme_req(req)->ctrl = &ctrl->ctrl; nvme_req(req)->cmd = &iod->cmd; return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req), (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); } static struct lock_class_key loop_hctx_fq_lock_key; static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data); struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); /* * flush_end_io() can be called recursively for us, so use our own * lock class key for avoiding lockdep possible recursive locking, * then we can remove the dynamically allocated lock class for each * flush queue, that way may cause horrible boot delay. */ blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key); hctx->driver_data = queue; return 0; } static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data); struct nvme_loop_queue *queue = &ctrl->queues[0]; BUG_ON(hctx_idx != 0); hctx->driver_data = queue; return 0; } static const struct blk_mq_ops nvme_loop_mq_ops = { .queue_rq = nvme_loop_queue_rq, .complete = nvme_loop_complete_rq, .init_request = nvme_loop_init_request, .init_hctx = nvme_loop_init_hctx, }; static const struct blk_mq_ops nvme_loop_admin_mq_ops = { .queue_rq = nvme_loop_queue_rq, .complete = nvme_loop_complete_rq, .init_request = nvme_loop_init_request, .init_hctx = nvme_loop_init_admin_hctx, }; static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) return; nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvme_remove_admin_tag_set(&ctrl->ctrl); } static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); if (list_empty(&ctrl->list)) goto free_ctrl; mutex_lock(&nvme_loop_ctrl_mutex); list_del(&ctrl->list); mutex_unlock(&nvme_loop_ctrl_mutex); if (nctrl->tagset) nvme_remove_io_tag_set(nctrl); kfree(ctrl->queues); nvmf_free_options(nctrl->opts); free_ctrl: kfree(ctrl); } static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) { int i; for (i = 1; i < ctrl->ctrl.queue_count; i++) { clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); } ctrl->ctrl.queue_count = 1; } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; unsigned int nr_io_queues; int ret, i; nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret || !nr_io_queues) return ret; dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); for (i = 1; i <= nr_io_queues; i++) { ctrl->queues[i].ctrl = ctrl; ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); if (ret) goto out_destroy_queues; ctrl->ctrl.queue_count++; } return 0; out_destroy_queues: nvme_loop_destroy_io_queues(ctrl); return ret; } static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) { int i, ret; for (i = 1; i < ctrl->ctrl.queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) return ret; set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); } return 0; } static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) { int error; ctrl->queues[0].ctrl = ctrl; error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); if (error) return error; ctrl->ctrl.queue_count = 1; error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, &nvme_loop_admin_mq_ops, sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist)); if (error) goto out_free_sq; /* reset stopped state for the fresh admin queue */ clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags); error = nvmf_connect_admin_queue(&ctrl->ctrl); if (error) goto out_cleanup_tagset; set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); error = nvme_enable_ctrl(&ctrl->ctrl); if (error) goto out_cleanup_tagset; ctrl->ctrl.max_hw_sectors = (NVME_LOOP_MAX_SEGMENTS - 1) << PAGE_SECTORS_SHIFT; nvme_unquiesce_admin_queue(&ctrl->ctrl); error = nvme_init_ctrl_finish(&ctrl->ctrl, false); if (error) goto out_cleanup_tagset; return 0; out_cleanup_tagset: clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); nvme_remove_admin_tag_set(&ctrl->ctrl); out_free_sq: nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); return error; } static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) { if (ctrl->ctrl.queue_count > 1) { nvme_quiesce_io_queues(&ctrl->ctrl); nvme_cancel_tagset(&ctrl->ctrl); nvme_loop_destroy_io_queues(ctrl); } nvme_quiesce_admin_queue(&ctrl->ctrl); if (ctrl->ctrl.state == NVME_CTRL_LIVE) nvme_disable_ctrl(&ctrl->ctrl, true); nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_loop_destroy_admin_queue(ctrl); } static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl) { nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl)); } static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl) { struct nvme_loop_ctrl *ctrl; mutex_lock(&nvme_loop_ctrl_mutex); list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) { if (ctrl->ctrl.cntlid == nctrl->cntlid) nvme_delete_ctrl(&ctrl->ctrl); } mutex_unlock(&nvme_loop_ctrl_mutex); } static void nvme_loop_reset_ctrl_work(struct work_struct *work) { struct nvme_loop_ctrl *ctrl = container_of(work, struct nvme_loop_ctrl, ctrl.reset_work); int ret; nvme_stop_ctrl(&ctrl->ctrl); nvme_loop_shutdown_ctrl(ctrl); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { if (ctrl->ctrl.state != NVME_CTRL_DELETING && ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO) /* state change failure for non-deleted ctrl? */ WARN_ON_ONCE(1); return; } ret = nvme_loop_configure_admin_queue(ctrl); if (ret) goto out_disable; ret = nvme_loop_init_io_queues(ctrl); if (ret) goto out_destroy_admin; ret = nvme_loop_connect_io_queues(ctrl); if (ret) goto out_destroy_io; blk_mq_update_nr_hw_queues(&ctrl->tag_set, ctrl->ctrl.queue_count - 1); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) WARN_ON_ONCE(1); nvme_start_ctrl(&ctrl->ctrl); return; out_destroy_io: nvme_loop_destroy_io_queues(ctrl); out_destroy_admin: nvme_loop_destroy_admin_queue(ctrl); out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); nvme_uninit_ctrl(&ctrl->ctrl); } static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { .name = "loop", .module = THIS_MODULE, .flags = NVME_F_FABRICS, .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, .free_ctrl = nvme_loop_free_ctrl, .submit_async_event = nvme_loop_submit_async_event, .delete_ctrl = nvme_loop_delete_ctrl_host, .get_address = nvmf_get_address, }; static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) { int ret; ret = nvme_loop_init_io_queues(ctrl); if (ret) return ret; ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, &nvme_loop_mq_ops, 1, sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist)); if (ret) goto out_destroy_queues; ret = nvme_loop_connect_io_queues(ctrl); if (ret) goto out_cleanup_tagset; return 0; out_cleanup_tagset: nvme_remove_io_tag_set(&ctrl->ctrl); out_destroy_queues: nvme_loop_destroy_io_queues(ctrl); return ret; } static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl) { struct nvmet_port *p, *found = NULL; mutex_lock(&nvme_loop_ports_mutex); list_for_each_entry(p, &nvme_loop_ports, entry) { /* if no transport address is specified use the first port */ if ((ctrl->opts->mask & NVMF_OPT_TRADDR) && strcmp(ctrl->opts->traddr, p->disc_addr.traddr)) continue; found = p; break; } mutex_unlock(&nvme_loop_ports_mutex); return found; } static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { struct nvme_loop_ctrl *ctrl; int ret; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return ERR_PTR(-ENOMEM); ctrl->ctrl.opts = opts; INIT_LIST_HEAD(&ctrl->list); INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work); ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, 0 /* no quirks, we're perfect! */); if (ret) { kfree(ctrl); goto out; } if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) WARN_ON_ONCE(1); ret = -ENOMEM; ctrl->ctrl.kato = opts->kato; ctrl->port = nvme_loop_find_port(&ctrl->ctrl); ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), GFP_KERNEL); if (!ctrl->queues) goto out_uninit_ctrl; ret = nvme_loop_configure_admin_queue(ctrl); if (ret) goto out_free_queues; if (opts->queue_size > ctrl->ctrl.maxcmd) { /* warn if maxcmd is lower than queue_size */ dev_warn(ctrl->ctrl.device, "queue_size %zu > ctrl maxcmd %u, clamping down\n", opts->queue_size, ctrl->ctrl.maxcmd); opts->queue_size = ctrl->ctrl.maxcmd; } ctrl->ctrl.sqsize = opts->queue_size - 1; if (opts->nr_io_queues) { ret = nvme_loop_create_io_queues(ctrl); if (ret) goto out_remove_admin_queue; } nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0); dev_info(ctrl->ctrl.device, "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) WARN_ON_ONCE(1); mutex_lock(&nvme_loop_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); mutex_unlock(&nvme_loop_ctrl_mutex); nvme_start_ctrl(&ctrl->ctrl); return &ctrl->ctrl; out_remove_admin_queue: nvme_loop_destroy_admin_queue(ctrl); out_free_queues: kfree(ctrl->queues); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); out: if (ret > 0) ret = -EIO; return ERR_PTR(ret); } static int nvme_loop_add_port(struct nvmet_port *port) { mutex_lock(&nvme_loop_ports_mutex); list_add_tail(&port->entry, &nvme_loop_ports); mutex_unlock(&nvme_loop_ports_mutex); return 0; } static void nvme_loop_remove_port(struct nvmet_port *port) { mutex_lock(&nvme_loop_ports_mutex); list_del_init(&port->entry); mutex_unlock(&nvme_loop_ports_mutex); /* * Ensure any ctrls that are in the process of being * deleted are in fact deleted before we return * and free the port. This is to prevent active * ctrls from using a port after it's freed. */ flush_workqueue(nvme_delete_wq); } static const struct nvmet_fabrics_ops nvme_loop_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_LOOP, .add_port = nvme_loop_add_port, .remove_port = nvme_loop_remove_port, .queue_response = nvme_loop_queue_response, .delete_ctrl = nvme_loop_delete_ctrl, }; static struct nvmf_transport_ops nvme_loop_transport = { .name = "loop", .module = THIS_MODULE, .create_ctrl = nvme_loop_create_ctrl, .allowed_opts = NVMF_OPT_TRADDR, }; static int __init nvme_loop_init_module(void) { int ret; ret = nvmet_register_transport(&nvme_loop_ops); if (ret) return ret; ret = nvmf_register_transport(&nvme_loop_transport); if (ret) nvmet_unregister_transport(&nvme_loop_ops); return ret; } static void __exit nvme_loop_cleanup_module(void) { struct nvme_loop_ctrl *ctrl, *next; nvmf_unregister_transport(&nvme_loop_transport); nvmet_unregister_transport(&nvme_loop_ops); mutex_lock(&nvme_loop_ctrl_mutex); list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list) nvme_delete_ctrl(&ctrl->ctrl); mutex_unlock(&nvme_loop_ctrl_mutex); flush_workqueue(nvme_delete_wq); } module_init(nvme_loop_init_module); module_exit(nvme_loop_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
linux-master
drivers/nvme/target/loop.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe admin command implementation. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/rculist.h> #include <linux/part_stat.h> #include <generated/utsrelease.h> #include <asm/unaligned.h> #include "nvmet.h" u32 nvmet_get_log_page_len(struct nvme_command *cmd) { u32 len = le16_to_cpu(cmd->get_log_page.numdu); len <<= 16; len += le16_to_cpu(cmd->get_log_page.numdl); /* NUMD is a 0's based value */ len += 1; len *= sizeof(u32); return len; } static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) { switch (cdw10 & 0xff) { case NVME_FEAT_HOST_ID: return sizeof(req->sq->ctrl->hostid); default: return 0; } } u64 nvmet_get_log_page_offset(struct nvme_command *cmd) { return le64_to_cpu(cmd->get_log_page.lpo); } static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) { nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); } static void nvmet_execute_get_log_page_error(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; unsigned long flags; off_t offset = 0; u64 slot; u64 i; spin_lock_irqsave(&ctrl->error_lock, flags); slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) { if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], sizeof(struct nvme_error_slot))) break; if (slot == 0) slot = NVMET_ERROR_LOG_SLOTS - 1; else slot--; offset += sizeof(struct nvme_error_slot); } spin_unlock_irqrestore(&ctrl->error_lock, flags); nvmet_req_complete(req, 0); } static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, struct nvme_smart_log *slog) { u64 host_reads, host_writes, data_units_read, data_units_written; u16 status; status = nvmet_req_find_ns(req); if (status) return status; /* we don't have the right data for file backed ns */ if (!req->ns->bdev) return NVME_SC_SUCCESS; host_reads = part_stat_read(req->ns->bdev, ios[READ]); data_units_read = DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); data_units_written = DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(data_units_read, &slog->data_units_read[0]); put_unaligned_le64(host_writes, &slog->host_writes[0]); put_unaligned_le64(data_units_written, &slog->data_units_written[0]); return NVME_SC_SUCCESS; } static u16 nvmet_get_smart_log_all(struct nvmet_req *req, struct nvme_smart_log *slog) { u64 host_reads = 0, host_writes = 0; u64 data_units_read = 0, data_units_written = 0; struct nvmet_ns *ns; struct nvmet_ctrl *ctrl; unsigned long idx; ctrl = req->sq->ctrl; xa_for_each(&ctrl->subsys->namespaces, idx, ns) { /* we don't have the right data for file backed ns */ if (!ns->bdev) continue; host_reads += part_stat_read(ns->bdev, ios[READ]); data_units_read += DIV_ROUND_UP( part_stat_read(ns->bdev, sectors[READ]), 1000); host_writes += part_stat_read(ns->bdev, ios[WRITE]); data_units_written += DIV_ROUND_UP( part_stat_read(ns->bdev, sectors[WRITE]), 1000); } put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(data_units_read, &slog->data_units_read[0]); put_unaligned_le64(host_writes, &slog->host_writes[0]); put_unaligned_le64(data_units_written, &slog->data_units_written[0]); return NVME_SC_SUCCESS; } static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) { struct nvme_smart_log *log; u16 status = NVME_SC_INTERNAL; unsigned long flags; if (req->transfer_len != sizeof(*log)) goto out; log = kzalloc(sizeof(*log), GFP_KERNEL); if (!log) goto out; if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) status = nvmet_get_smart_log_all(req, log); else status = nvmet_get_smart_log_nsid(req, log); if (status) goto out_free_log; spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); put_unaligned_le64(req->sq->ctrl->err_counter, &log->num_err_log_entries); spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); out_free_log: kfree(log); out: nvmet_req_complete(req, status); } static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) { log->acs[nvme_admin_get_log_page] = log->acs[nvme_admin_identify] = log->acs[nvme_admin_abort_cmd] = log->acs[nvme_admin_set_features] = log->acs[nvme_admin_get_features] = log->acs[nvme_admin_async_event] = log->acs[nvme_admin_keep_alive] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); log->iocs[nvme_cmd_read] = log->iocs[nvme_cmd_flush] = log->iocs[nvme_cmd_dsm] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); log->iocs[nvme_cmd_write] = log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); } static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) { log->iocs[nvme_cmd_zone_append] = log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); } static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) { struct nvme_effects_log *log; u16 status = NVME_SC_SUCCESS; log = kzalloc(sizeof(*log), GFP_KERNEL); if (!log) { status = NVME_SC_INTERNAL; goto out; } switch (req->cmd->get_log_page.csi) { case NVME_CSI_NVM: nvmet_get_cmd_effects_nvm(log); break; case NVME_CSI_ZNS: if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { status = NVME_SC_INVALID_IO_CMD_SET; goto free; } nvmet_get_cmd_effects_nvm(log); nvmet_get_cmd_effects_zns(log); break; default: status = NVME_SC_INVALID_LOG_PAGE; goto free; } status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); free: kfree(log); out: nvmet_req_complete(req, status); } static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; u16 status = NVME_SC_INTERNAL; size_t len; if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) goto out; mutex_lock(&ctrl->lock); if (ctrl->nr_changed_ns == U32_MAX) len = sizeof(__le32); else len = ctrl->nr_changed_ns * sizeof(__le32); status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); if (!status) status = nvmet_zero_sgl(req, len, req->transfer_len - len); ctrl->nr_changed_ns = 0; nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR); mutex_unlock(&ctrl->lock); out: nvmet_req_complete(req, status); } static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, struct nvme_ana_group_desc *desc) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ns *ns; unsigned long idx; u32 count = 0; if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { xa_for_each(&ctrl->subsys->namespaces, idx, ns) if (ns->anagrpid == grpid) desc->nsids[count++] = cpu_to_le32(ns->nsid); } desc->grpid = cpu_to_le32(grpid); desc->nnsids = cpu_to_le32(count); desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt); desc->state = req->port->ana_state[grpid]; memset(desc->rsvd17, 0, sizeof(desc->rsvd17)); return struct_size(desc, nsids, count); } static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) { struct nvme_ana_rsp_hdr hdr = { 0, }; struct nvme_ana_group_desc *desc; size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */ size_t len; u32 grpid; u16 ngrps = 0; u16 status; status = NVME_SC_INTERNAL; desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES), GFP_KERNEL); if (!desc) goto out; down_read(&nvmet_ana_sem); for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) { if (!nvmet_ana_group_enabled[grpid]) continue; len = nvmet_format_ana_group(req, grpid, desc); status = nvmet_copy_to_sgl(req, offset, desc, len); if (status) break; offset += len; ngrps++; } for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) { if (nvmet_ana_group_enabled[grpid]) ngrps++; } hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); hdr.ngrps = cpu_to_le16(ngrps); nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE); up_read(&nvmet_ana_sem); kfree(desc); /* copy the header last once we know the number of groups */ status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr)); out: nvmet_req_complete(req, status); } static void nvmet_execute_get_log_page(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) return; switch (req->cmd->get_log_page.lid) { case NVME_LOG_ERROR: return nvmet_execute_get_log_page_error(req); case NVME_LOG_SMART: return nvmet_execute_get_log_page_smart(req); case NVME_LOG_FW_SLOT: /* * We only support a single firmware slot which always is * active, so we can zero out the whole firmware slot log and * still claim to fully implement this mandatory log page. */ return nvmet_execute_get_log_page_noop(req); case NVME_LOG_CHANGED_NS: return nvmet_execute_get_log_changed_ns(req); case NVME_LOG_CMD_EFFECTS: return nvmet_execute_get_log_cmd_effects_ns(req); case NVME_LOG_ANA: return nvmet_execute_get_log_page_ana(req); } pr_debug("unhandled lid %d on qid %d\n", req->cmd->get_log_page.lid, req->sq->qid); req->error_loc = offsetof(struct nvme_get_log_page_command, lid); nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); } static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_subsys *subsys = ctrl->subsys; struct nvme_id_ctrl *id; u32 cmd_capsule_size; u16 status = 0; if (!subsys->subsys_discovered) { mutex_lock(&subsys->lock); subsys->subsys_discovered = true; mutex_unlock(&subsys->lock); } id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { status = NVME_SC_INTERNAL; goto out; } /* XXX: figure out how to assign real vendors IDs. */ id->vid = 0; id->ssvid = 0; memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, strlen(subsys->model_number), ' '); memcpy_and_pad(id->fr, sizeof(id->fr), subsys->firmware_rev, strlen(subsys->firmware_rev), ' '); put_unaligned_le24(subsys->ieee_oui, id->ieee); id->rab = 6; if (nvmet_is_disc_subsys(ctrl->subsys)) id->cntrltype = NVME_CTRL_DISC; else id->cntrltype = NVME_CTRL_IO; /* we support multiple ports, multiples hosts and ANA: */ id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | NVME_CTRL_CMIC_ANA; /* Limit MDTS according to transport capability */ if (ctrl->ops->get_mdts) id->mdts = ctrl->ops->get_mdts(ctrl); else id->mdts = 0; id->cntlid = cpu_to_le16(ctrl->cntlid); id->ver = cpu_to_le32(ctrl->subsys->ver); /* XXX: figure out what to do about RTD3R/RTD3 */ id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS); id->oacs = 0; /* * We don't really have a practical limit on the number of abort * comands. But we don't do anything useful for abort either, so * no point in allowing more abort commands than the spec requires. */ id->acl = 3; id->aerl = NVMET_ASYNC_EVENTS - 1; /* first slot is read-only, only one slot supported */ id->frmw = (1 << 0) | (1 << 1); id->lpa = (1 << 0) | (1 << 1) | (1 << 2); id->elpe = NVMET_ERROR_LOG_SLOTS - 1; id->npss = 0; /* We support keep-alive timeout in granularity of seconds */ id->kas = cpu_to_le16(NVMET_KAS); id->sqes = (0x6 << 4) | 0x6; id->cqes = (0x4 << 4) | 0x4; /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | NVME_CTRL_ONCS_WRITE_ZEROES); /* XXX: don't report vwc if the underlying device is write through */ id->vwc = NVME_CTRL_VWC_PRESENT; /* * We can't support atomic writes bigger than a LBA without support * from the backend device. */ id->awun = 0; id->awupf = 0; id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ if (ctrl->ops->flags & NVMF_KEYED_SGLS) id->sgls |= cpu_to_le32(1 << 2); if (req->port->inline_data_size) id->sgls |= cpu_to_le32(1 << 20); strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); /* * Max command capsule size is sqe + in-capsule data size. * Disable in-capsule data for Metadata capable controllers. */ cmd_capsule_size = sizeof(struct nvme_command); if (!ctrl->pi_support) cmd_capsule_size += req->port->inline_data_size; id->ioccsz = cpu_to_le32(cmd_capsule_size / 16); /* Max response capsule size is cqe */ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); id->msdbd = ctrl->ops->msdbd; id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); id->anatt = 10; /* random value */ id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS); /* * Meh, we don't really support any power state. Fake up the same * values that qemu does. */ id->psd[0].max_power = cpu_to_le16(0x9c4); id->psd[0].entry_lat = cpu_to_le32(0x10); id->psd[0].exit_lat = cpu_to_le32(0x4); id->nwpc = 1 << 0; /* write protect and no write protect */ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); kfree(id); out: nvmet_req_complete(req, status); } static void nvmet_execute_identify_ns(struct nvmet_req *req) { struct nvme_id_ns *id; u16 status; if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); status = NVME_SC_INVALID_NS | NVME_SC_DNR; goto out; } id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { status = NVME_SC_INTERNAL; goto out; } /* return an all zeroed buffer if we can't find an active namespace */ status = nvmet_req_find_ns(req); if (status) { status = 0; goto done; } if (nvmet_ns_revalidate(req->ns)) { mutex_lock(&req->ns->subsys->lock); nvmet_ns_changed(req->ns->subsys, req->ns->nsid); mutex_unlock(&req->ns->subsys->lock); } /* * nuse = ncap = nsze isn't always true, but we have no way to find * that out from the underlying device. */ id->ncap = id->nsze = cpu_to_le64(req->ns->size >> req->ns->blksize_shift); switch (req->port->ana_state[req->ns->anagrpid]) { case NVME_ANA_INACCESSIBLE: case NVME_ANA_PERSISTENT_LOSS: break; default: id->nuse = id->nsze; break; } if (req->ns->bdev) nvmet_bdev_set_limits(req->ns->bdev, id); /* * We just provide a single LBA format that matches what the * underlying device reports. */ id->nlbaf = 0; id->flbas = 0; /* * Our namespace might always be shared. Not just with other * controllers, but also with any other user of the block device. */ id->nmic = NVME_NS_NMIC_SHARED; id->anagrpid = cpu_to_le32(req->ns->anagrpid); memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); id->lbaf[0].ds = req->ns->blksize_shift; if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 | NVME_NS_DPC_PI_TYPE3; id->mc = NVME_MC_EXTENDED_LBA; id->dps = req->ns->pi_type; id->flbas = NVME_NS_FLBAS_META_EXT; id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); } if (req->ns->readonly) id->nsattr |= NVME_NS_ATTR_RO; done: if (!status) status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); kfree(id); out: nvmet_req_complete(req, status); } static void nvmet_execute_identify_nslist(struct nvmet_req *req) { static const int buf_size = NVME_IDENTIFY_DATA_SIZE; struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ns *ns; unsigned long idx; u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); __le32 *list; u16 status = 0; int i = 0; list = kzalloc(buf_size, GFP_KERNEL); if (!list) { status = NVME_SC_INTERNAL; goto out; } xa_for_each(&ctrl->subsys->namespaces, idx, ns) { if (ns->nsid <= min_nsid) continue; list[i++] = cpu_to_le32(ns->nsid); if (i == buf_size / sizeof(__le32)) break; } status = nvmet_copy_to_sgl(req, 0, list, buf_size); kfree(list); out: nvmet_req_complete(req, status); } static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, void *id, off_t *off) { struct nvme_ns_id_desc desc = { .nidt = type, .nidl = len, }; u16 status; status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc)); if (status) return status; *off += sizeof(desc); status = nvmet_copy_to_sgl(req, *off, id, len); if (status) return status; *off += len; return 0; } static void nvmet_execute_identify_desclist(struct nvmet_req *req) { off_t off = 0; u16 status; status = nvmet_req_find_ns(req); if (status) goto out; if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID, NVME_NIDT_UUID_LEN, &req->ns->uuid, &off); if (status) goto out; } if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID, NVME_NIDT_NGUID_LEN, &req->ns->nguid, &off); if (status) goto out; } status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI, NVME_NIDT_CSI_LEN, &req->ns->csi, &off); if (status) goto out; if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, off) != NVME_IDENTIFY_DATA_SIZE - off) status = NVME_SC_INTERNAL | NVME_SC_DNR; out: nvmet_req_complete(req, status); } static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req) { /* Not supported: return zeroes */ nvmet_req_complete(req, nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm))); } static void nvmet_execute_identify(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) return; switch (req->cmd->identify.cns) { case NVME_ID_CNS_NS: nvmet_execute_identify_ns(req); return; case NVME_ID_CNS_CTRL: nvmet_execute_identify_ctrl(req); return; case NVME_ID_CNS_NS_ACTIVE_LIST: nvmet_execute_identify_nslist(req); return; case NVME_ID_CNS_NS_DESC_LIST: nvmet_execute_identify_desclist(req); return; case NVME_ID_CNS_CS_NS: switch (req->cmd->identify.csi) { case NVME_CSI_NVM: /* Not supported */ break; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { nvmet_execute_identify_ns_zns(req); return; } break; } break; case NVME_ID_CNS_CS_CTRL: switch (req->cmd->identify.csi) { case NVME_CSI_NVM: nvmet_execute_identify_ctrl_nvm(req); return; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { nvmet_execute_identify_ctrl_zns(req); return; } break; } break; } pr_debug("unhandled identify cns %d on qid %d\n", req->cmd->identify.cns, req->sq->qid); req->error_loc = offsetof(struct nvme_identify, cns); nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); } /* * A "minimum viable" abort implementation: the command is mandatory in the * spec, but we are not required to do any useful work. We couldn't really * do a useful abort, so don't bother even with waiting for the command * to be exectuted and return immediately telling the command to abort * wasn't found. */ static void nvmet_execute_abort(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, 0)) return; nvmet_set_result(req, 1); nvmet_req_complete(req, 0); } static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req) { u16 status; if (req->ns->file) status = nvmet_file_flush(req); else status = nvmet_bdev_flush(req); if (status) pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); return status; } static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) { u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); struct nvmet_subsys *subsys = nvmet_req_subsys(req); u16 status; status = nvmet_req_find_ns(req); if (status) return status; mutex_lock(&subsys->lock); switch (write_protect) { case NVME_NS_WRITE_PROTECT: req->ns->readonly = true; status = nvmet_write_protect_flush_sync(req); if (status) req->ns->readonly = false; break; case NVME_NS_NO_WRITE_PROTECT: req->ns->readonly = false; status = 0; break; default: break; } if (!status) nvmet_ns_changed(subsys, req->ns->nsid); mutex_unlock(&subsys->lock); return status; } u16 nvmet_set_feat_kato(struct nvmet_req *req) { u32 val32 = le32_to_cpu(req->cmd->common.cdw11); nvmet_stop_keep_alive_timer(req->sq->ctrl); req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); nvmet_start_keep_alive_timer(req->sq->ctrl); nvmet_set_result(req, req->sq->ctrl->kato); return 0; } u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) { u32 val32 = le32_to_cpu(req->cmd->common.cdw11); if (val32 & ~mask) { req->error_loc = offsetof(struct nvme_common_command, cdw11); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); nvmet_set_result(req, val32); return 0; } void nvmet_execute_set_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = nvmet_req_subsys(req); u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); u16 status = 0; u16 nsqr; u16 ncqr; if (!nvmet_check_data_len_lte(req, 0)) return; switch (cdw10 & 0xff) { case NVME_FEAT_NUM_QUEUES: ncqr = (cdw11 >> 16) & 0xffff; nsqr = cdw11 & 0xffff; if (ncqr == 0xffff || nsqr == 0xffff) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; } nvmet_set_result(req, (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); break; case NVME_FEAT_KATO: status = nvmet_set_feat_kato(req); break; case NVME_FEAT_ASYNC_EVENT: status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); break; case NVME_FEAT_HOST_ID: status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; break; case NVME_FEAT_WRITE_PROTECT: status = nvmet_set_feat_write_protect(req); break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; } nvmet_req_complete(req, status); } static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) { struct nvmet_subsys *subsys = nvmet_req_subsys(req); u32 result; result = nvmet_req_find_ns(req); if (result) return result; mutex_lock(&subsys->lock); if (req->ns->readonly == true) result = NVME_NS_WRITE_PROTECT; else result = NVME_NS_NO_WRITE_PROTECT; nvmet_set_result(req, result); mutex_unlock(&subsys->lock); return 0; } void nvmet_get_feat_kato(struct nvmet_req *req) { nvmet_set_result(req, req->sq->ctrl->kato * 1000); } void nvmet_get_feat_async_event(struct nvmet_req *req) { nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); } void nvmet_execute_get_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = nvmet_req_subsys(req); u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10))) return; switch (cdw10 & 0xff) { /* * These features are mandatory in the spec, but we don't * have a useful way to implement them. We'll eventually * need to come up with some fake values for these. */ #if 0 case NVME_FEAT_ARBITRATION: break; case NVME_FEAT_POWER_MGMT: break; case NVME_FEAT_TEMP_THRESH: break; case NVME_FEAT_ERR_RECOVERY: break; case NVME_FEAT_IRQ_COALESCE: break; case NVME_FEAT_IRQ_CONFIG: break; case NVME_FEAT_WRITE_ATOMIC: break; #endif case NVME_FEAT_ASYNC_EVENT: nvmet_get_feat_async_event(req); break; case NVME_FEAT_VOLATILE_WC: nvmet_set_result(req, 1); break; case NVME_FEAT_NUM_QUEUES: nvmet_set_result(req, (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); break; case NVME_FEAT_KATO: nvmet_get_feat_kato(req); break; case NVME_FEAT_HOST_ID: /* need 128-bit host identifier flag */ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { req->error_loc = offsetof(struct nvme_common_command, cdw11); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; } status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, sizeof(req->sq->ctrl->hostid)); break; case NVME_FEAT_WRITE_PROTECT: status = nvmet_get_feat_write_protect(req); break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; } nvmet_req_complete(req, status); } void nvmet_execute_async_event(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; if (!nvmet_check_transfer_len(req, 0)) return; mutex_lock(&ctrl->lock); if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { mutex_unlock(&ctrl->lock); nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); return; } ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; mutex_unlock(&ctrl->lock); queue_work(nvmet_wq, &ctrl->async_event_work); } void nvmet_execute_keep_alive(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; u16 status = 0; if (!nvmet_check_transfer_len(req, 0)) return; if (!ctrl->kato) { status = NVME_SC_KA_TIMEOUT_INVALID; goto out; } pr_debug("ctrl %d update keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); out: nvmet_req_complete(req, status); } u16 nvmet_parse_admin_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; u16 ret; if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_admin_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_parse_discovery_cmd(req); ret = nvmet_check_ctrl_status(req); if (unlikely(ret)) return ret; if (nvmet_is_passthru_req(req)) return nvmet_parse_passthru_admin_cmd(req); switch (cmd->common.opcode) { case nvme_admin_get_log_page: req->execute = nvmet_execute_get_log_page; return 0; case nvme_admin_identify: req->execute = nvmet_execute_identify; return 0; case nvme_admin_abort_cmd: req->execute = nvmet_execute_abort; return 0; case nvme_admin_set_features: req->execute = nvmet_execute_set_features; return 0; case nvme_admin_get_features: req->execute = nvmet_execute_get_features; return 0; case nvme_admin_async_event: req->execute = nvmet_execute_async_event; return 0; case nvme_admin_keep_alive: req->execute = nvmet_execute_keep_alive; return 0; default: return nvmet_report_invalid_opcode(req); } }
linux-master
drivers/nvme/target/admin-cmd.c
// SPDX-License-Identifier: GPL-2.0 /* * Configfs interface for the NVMe target. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kstrtox.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/ctype.h> #include <linux/pci.h> #include <linux/pci-p2pdma.h> #ifdef CONFIG_NVME_TARGET_AUTH #include <linux/nvme-auth.h> #endif #include <crypto/hash.h> #include <crypto/kpp.h> #include "nvmet.h" static const struct config_item_type nvmet_host_type; static const struct config_item_type nvmet_subsys_type; static LIST_HEAD(nvmet_ports_list); struct list_head *nvmet_ports = &nvmet_ports_list; struct nvmet_type_name_map { u8 type; const char *name; }; static struct nvmet_type_name_map nvmet_transport[] = { { NVMF_TRTYPE_RDMA, "rdma" }, { NVMF_TRTYPE_FC, "fc" }, { NVMF_TRTYPE_TCP, "tcp" }, { NVMF_TRTYPE_LOOP, "loop" }, }; static const struct nvmet_type_name_map nvmet_addr_family[] = { { NVMF_ADDR_FAMILY_PCI, "pcie" }, { NVMF_ADDR_FAMILY_IP4, "ipv4" }, { NVMF_ADDR_FAMILY_IP6, "ipv6" }, { NVMF_ADDR_FAMILY_IB, "ib" }, { NVMF_ADDR_FAMILY_FC, "fc" }, { NVMF_ADDR_FAMILY_LOOP, "loop" }, }; static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller) { if (p->enabled) pr_err("Disable port '%u' before changing attribute in %s\n", le16_to_cpu(p->disc_addr.portid), caller); return p->enabled; } /* * nvmet_port Generic ConfigFS definitions. * Used in any place in the ConfigFS tree that refers to an address. */ static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page) { u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam; int i; for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { if (nvmet_addr_family[i].type == adrfam) return snprintf(page, PAGE_SIZE, "%s\n", nvmet_addr_family[i].name); } return snprintf(page, PAGE_SIZE, "\n"); } static ssize_t nvmet_addr_adrfam_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); int i; if (nvmet_is_port_enabled(port, __func__)) return -EACCES; for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { if (sysfs_streq(page, nvmet_addr_family[i].name)) goto found; } pr_err("Invalid value '%s' for adrfam\n", page); return -EINVAL; found: port->disc_addr.adrfam = nvmet_addr_family[i].type; return count; } CONFIGFS_ATTR(nvmet_, addr_adrfam); static ssize_t nvmet_addr_portid_show(struct config_item *item, char *page) { __le16 portid = to_nvmet_port(item)->disc_addr.portid; return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid)); } static ssize_t nvmet_addr_portid_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); u16 portid = 0; if (kstrtou16(page, 0, &portid)) { pr_err("Invalid value '%s' for portid\n", page); return -EINVAL; } if (nvmet_is_port_enabled(port, __func__)) return -EACCES; port->disc_addr.portid = cpu_to_le16(portid); return count; } CONFIGFS_ATTR(nvmet_, addr_portid); static ssize_t nvmet_addr_traddr_show(struct config_item *item, char *page) { struct nvmet_port *port = to_nvmet_port(item); return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr); } static ssize_t nvmet_addr_traddr_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); if (count > NVMF_TRADDR_SIZE) { pr_err("Invalid value '%s' for traddr\n", page); return -EINVAL; } if (nvmet_is_port_enabled(port, __func__)) return -EACCES; if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1) return -EINVAL; return count; } CONFIGFS_ATTR(nvmet_, addr_traddr); static const struct nvmet_type_name_map nvmet_addr_treq[] = { { NVMF_TREQ_NOT_SPECIFIED, "not specified" }, { NVMF_TREQ_REQUIRED, "required" }, { NVMF_TREQ_NOT_REQUIRED, "not required" }, }; static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page) { u8 treq = to_nvmet_port(item)->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK; int i; for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { if (treq == nvmet_addr_treq[i].type) return snprintf(page, PAGE_SIZE, "%s\n", nvmet_addr_treq[i].name); } return snprintf(page, PAGE_SIZE, "\n"); } static ssize_t nvmet_addr_treq_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK; int i; if (nvmet_is_port_enabled(port, __func__)) return -EACCES; for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { if (sysfs_streq(page, nvmet_addr_treq[i].name)) goto found; } pr_err("Invalid value '%s' for treq\n", page); return -EINVAL; found: treq |= nvmet_addr_treq[i].type; port->disc_addr.treq = treq; return count; } CONFIGFS_ATTR(nvmet_, addr_treq); static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, char *page) { struct nvmet_port *port = to_nvmet_port(item); return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid); } static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); if (count > NVMF_TRSVCID_SIZE) { pr_err("Invalid value '%s' for trsvcid\n", page); return -EINVAL; } if (nvmet_is_port_enabled(port, __func__)) return -EACCES; if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1) return -EINVAL; return count; } CONFIGFS_ATTR(nvmet_, addr_trsvcid); static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, char *page) { struct nvmet_port *port = to_nvmet_port(item); return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); } static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); int ret; if (nvmet_is_port_enabled(port, __func__)) return -EACCES; ret = kstrtoint(page, 0, &port->inline_data_size); if (ret) { pr_err("Invalid value '%s' for inline_data_size\n", page); return -EINVAL; } return count; } CONFIGFS_ATTR(nvmet_, param_inline_data_size); #ifdef CONFIG_BLK_DEV_INTEGRITY static ssize_t nvmet_param_pi_enable_show(struct config_item *item, char *page) { struct nvmet_port *port = to_nvmet_port(item); return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable); } static ssize_t nvmet_param_pi_enable_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); bool val; if (kstrtobool(page, &val)) return -EINVAL; if (nvmet_is_port_enabled(port, __func__)) return -EACCES; port->pi_enable = val; return count; } CONFIGFS_ATTR(nvmet_, param_pi_enable); #endif static ssize_t nvmet_addr_trtype_show(struct config_item *item, char *page) { struct nvmet_port *port = to_nvmet_port(item); int i; for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { if (port->disc_addr.trtype == nvmet_transport[i].type) return snprintf(page, PAGE_SIZE, "%s\n", nvmet_transport[i].name); } return sprintf(page, "\n"); } static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) { port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; } static ssize_t nvmet_addr_trtype_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); int i; if (nvmet_is_port_enabled(port, __func__)) return -EACCES; for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { if (sysfs_streq(page, nvmet_transport[i].name)) goto found; } pr_err("Invalid value '%s' for trtype\n", page); return -EINVAL; found: memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); port->disc_addr.trtype = nvmet_transport[i].type; if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) nvmet_port_init_tsas_rdma(port); return count; } CONFIGFS_ATTR(nvmet_, addr_trtype); /* * Namespace structures & file operation functions below */ static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page) { return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path); } static ssize_t nvmet_ns_device_path_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ns *ns = to_nvmet_ns(item); struct nvmet_subsys *subsys = ns->subsys; size_t len; int ret; mutex_lock(&subsys->lock); ret = -EBUSY; if (ns->enabled) goto out_unlock; ret = -EINVAL; len = strcspn(page, "\n"); if (!len) goto out_unlock; kfree(ns->device_path); ret = -ENOMEM; ns->device_path = kmemdup_nul(page, len, GFP_KERNEL); if (!ns->device_path) goto out_unlock; mutex_unlock(&subsys->lock); return count; out_unlock: mutex_unlock(&subsys->lock); return ret; } CONFIGFS_ATTR(nvmet_ns_, device_path); #ifdef CONFIG_PCI_P2PDMA static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page) { struct nvmet_ns *ns = to_nvmet_ns(item); return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem); } static ssize_t nvmet_ns_p2pmem_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ns *ns = to_nvmet_ns(item); struct pci_dev *p2p_dev = NULL; bool use_p2pmem; int ret = count; int error; mutex_lock(&ns->subsys->lock); if (ns->enabled) { ret = -EBUSY; goto out_unlock; } error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem); if (error) { ret = error; goto out_unlock; } ns->use_p2pmem = use_p2pmem; pci_dev_put(ns->p2p_dev); ns->p2p_dev = p2p_dev; out_unlock: mutex_unlock(&ns->subsys->lock); return ret; } CONFIGFS_ATTR(nvmet_ns_, p2pmem); #endif /* CONFIG_PCI_P2PDMA */ static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) { return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); } static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ns *ns = to_nvmet_ns(item); struct nvmet_subsys *subsys = ns->subsys; int ret = 0; mutex_lock(&subsys->lock); if (ns->enabled) { ret = -EBUSY; goto out_unlock; } if (uuid_parse(page, &ns->uuid)) ret = -EINVAL; out_unlock: mutex_unlock(&subsys->lock); return ret ? ret : count; } CONFIGFS_ATTR(nvmet_ns_, device_uuid); static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) { return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); } static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ns *ns = to_nvmet_ns(item); struct nvmet_subsys *subsys = ns->subsys; u8 nguid[16]; const char *p = page; int i; int ret = 0; mutex_lock(&subsys->lock); if (ns->enabled) { ret = -EBUSY; goto out_unlock; } for (i = 0; i < 16; i++) { if (p + 2 > page + count) { ret = -EINVAL; goto out_unlock; } if (!isxdigit(p[0]) || !isxdigit(p[1])) { ret = -EINVAL; goto out_unlock; } nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]); p += 2; if (*p == '-' || *p == ':') p++; } memcpy(&ns->nguid, nguid, sizeof(nguid)); out_unlock: mutex_unlock(&subsys->lock); return ret ? ret : count; } CONFIGFS_ATTR(nvmet_ns_, device_nguid); static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); } static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ns *ns = to_nvmet_ns(item); u32 oldgrpid, newgrpid; int ret; ret = kstrtou32(page, 0, &newgrpid); if (ret) return ret; if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) return -EINVAL; down_write(&nvmet_ana_sem); oldgrpid = ns->anagrpid; nvmet_ana_group_enabled[newgrpid]++; ns->anagrpid = newgrpid; nvmet_ana_group_enabled[oldgrpid]--; nvmet_ana_chgcnt++; up_write(&nvmet_ana_sem); nvmet_send_ana_event(ns->subsys, NULL); return count; } CONFIGFS_ATTR(nvmet_ns_, ana_grpid); static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); } static ssize_t nvmet_ns_enable_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ns *ns = to_nvmet_ns(item); bool enable; int ret = 0; if (kstrtobool(page, &enable)) return -EINVAL; if (enable) ret = nvmet_ns_enable(ns); else nvmet_ns_disable(ns); return ret ? ret : count; } CONFIGFS_ATTR(nvmet_ns_, enable); static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); } static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ns *ns = to_nvmet_ns(item); bool val; if (kstrtobool(page, &val)) return -EINVAL; mutex_lock(&ns->subsys->lock); if (ns->enabled) { pr_err("disable ns before setting buffered_io value.\n"); mutex_unlock(&ns->subsys->lock); return -EINVAL; } ns->buffered_io = val; mutex_unlock(&ns->subsys->lock); return count; } CONFIGFS_ATTR(nvmet_ns_, buffered_io); static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ns *ns = to_nvmet_ns(item); bool val; if (kstrtobool(page, &val)) return -EINVAL; if (!val) return -EINVAL; mutex_lock(&ns->subsys->lock); if (!ns->enabled) { pr_err("enable ns before revalidate.\n"); mutex_unlock(&ns->subsys->lock); return -EINVAL; } if (nvmet_ns_revalidate(ns)) nvmet_ns_changed(ns->subsys, ns->nsid); mutex_unlock(&ns->subsys->lock); return count; } CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size); static struct configfs_attribute *nvmet_ns_attrs[] = { &nvmet_ns_attr_device_path, &nvmet_ns_attr_device_nguid, &nvmet_ns_attr_device_uuid, &nvmet_ns_attr_ana_grpid, &nvmet_ns_attr_enable, &nvmet_ns_attr_buffered_io, &nvmet_ns_attr_revalidate_size, #ifdef CONFIG_PCI_P2PDMA &nvmet_ns_attr_p2pmem, #endif NULL, }; static void nvmet_ns_release(struct config_item *item) { struct nvmet_ns *ns = to_nvmet_ns(item); nvmet_ns_free(ns); } static struct configfs_item_operations nvmet_ns_item_ops = { .release = nvmet_ns_release, }; static const struct config_item_type nvmet_ns_type = { .ct_item_ops = &nvmet_ns_item_ops, .ct_attrs = nvmet_ns_attrs, .ct_owner = THIS_MODULE, }; static struct config_group *nvmet_ns_make(struct config_group *group, const char *name) { struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item); struct nvmet_ns *ns; int ret; u32 nsid; ret = kstrtou32(name, 0, &nsid); if (ret) goto out; ret = -EINVAL; if (nsid == 0 || nsid == NVME_NSID_ALL) { pr_err("invalid nsid %#x", nsid); goto out; } ret = -ENOMEM; ns = nvmet_ns_alloc(subsys, nsid); if (!ns) goto out; config_group_init_type_name(&ns->group, name, &nvmet_ns_type); pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn); return &ns->group; out: return ERR_PTR(ret); } static struct configfs_group_operations nvmet_namespaces_group_ops = { .make_group = nvmet_ns_make, }; static const struct config_item_type nvmet_namespaces_type = { .ct_group_ops = &nvmet_namespaces_group_ops, .ct_owner = THIS_MODULE, }; #ifdef CONFIG_NVME_TARGET_PASSTHRU static ssize_t nvmet_passthru_device_path_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item->ci_parent); return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path); } static ssize_t nvmet_passthru_device_path_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item->ci_parent); size_t len; int ret; mutex_lock(&subsys->lock); ret = -EBUSY; if (subsys->passthru_ctrl) goto out_unlock; ret = -EINVAL; len = strcspn(page, "\n"); if (!len) goto out_unlock; kfree(subsys->passthru_ctrl_path); ret = -ENOMEM; subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL); if (!subsys->passthru_ctrl_path) goto out_unlock; mutex_unlock(&subsys->lock); return count; out_unlock: mutex_unlock(&subsys->lock); return ret; } CONFIGFS_ATTR(nvmet_passthru_, device_path); static ssize_t nvmet_passthru_enable_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item->ci_parent); return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0); } static ssize_t nvmet_passthru_enable_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item->ci_parent); bool enable; int ret = 0; if (kstrtobool(page, &enable)) return -EINVAL; if (enable) ret = nvmet_passthru_ctrl_enable(subsys); else nvmet_passthru_ctrl_disable(subsys); return ret ? ret : count; } CONFIGFS_ATTR(nvmet_passthru_, enable); static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout); } static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item->ci_parent); unsigned int timeout; if (kstrtouint(page, 0, &timeout)) return -EINVAL; subsys->admin_timeout = timeout; return count; } CONFIGFS_ATTR(nvmet_passthru_, admin_timeout); static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout); } static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item->ci_parent); unsigned int timeout; if (kstrtouint(page, 0, &timeout)) return -EINVAL; subsys->io_timeout = timeout; return count; } CONFIGFS_ATTR(nvmet_passthru_, io_timeout); static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids); } static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item->ci_parent); unsigned int clear_ids; if (kstrtouint(page, 0, &clear_ids)) return -EINVAL; subsys->clear_ids = clear_ids; return count; } CONFIGFS_ATTR(nvmet_passthru_, clear_ids); static struct configfs_attribute *nvmet_passthru_attrs[] = { &nvmet_passthru_attr_device_path, &nvmet_passthru_attr_enable, &nvmet_passthru_attr_admin_timeout, &nvmet_passthru_attr_io_timeout, &nvmet_passthru_attr_clear_ids, NULL, }; static const struct config_item_type nvmet_passthru_type = { .ct_attrs = nvmet_passthru_attrs, .ct_owner = THIS_MODULE, }; static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) { config_group_init_type_name(&subsys->passthru_group, "passthru", &nvmet_passthru_type); configfs_add_default_group(&subsys->passthru_group, &subsys->group); } #else /* CONFIG_NVME_TARGET_PASSTHRU */ static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) { } #endif /* CONFIG_NVME_TARGET_PASSTHRU */ static int nvmet_port_subsys_allow_link(struct config_item *parent, struct config_item *target) { struct nvmet_port *port = to_nvmet_port(parent->ci_parent); struct nvmet_subsys *subsys; struct nvmet_subsys_link *link, *p; int ret; if (target->ci_type != &nvmet_subsys_type) { pr_err("can only link subsystems into the subsystems dir.!\n"); return -EINVAL; } subsys = to_subsys(target); link = kmalloc(sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; link->subsys = subsys; down_write(&nvmet_config_sem); ret = -EEXIST; list_for_each_entry(p, &port->subsystems, entry) { if (p->subsys == subsys) goto out_free_link; } if (list_empty(&port->subsystems)) { ret = nvmet_enable_port(port); if (ret) goto out_free_link; } list_add_tail(&link->entry, &port->subsystems); nvmet_port_disc_changed(port, subsys); up_write(&nvmet_config_sem); return 0; out_free_link: up_write(&nvmet_config_sem); kfree(link); return ret; } static void nvmet_port_subsys_drop_link(struct config_item *parent, struct config_item *target) { struct nvmet_port *port = to_nvmet_port(parent->ci_parent); struct nvmet_subsys *subsys = to_subsys(target); struct nvmet_subsys_link *p; down_write(&nvmet_config_sem); list_for_each_entry(p, &port->subsystems, entry) { if (p->subsys == subsys) goto found; } up_write(&nvmet_config_sem); return; found: list_del(&p->entry); nvmet_port_del_ctrls(port, subsys); nvmet_port_disc_changed(port, subsys); if (list_empty(&port->subsystems)) nvmet_disable_port(port); up_write(&nvmet_config_sem); kfree(p); } static struct configfs_item_operations nvmet_port_subsys_item_ops = { .allow_link = nvmet_port_subsys_allow_link, .drop_link = nvmet_port_subsys_drop_link, }; static const struct config_item_type nvmet_port_subsys_type = { .ct_item_ops = &nvmet_port_subsys_item_ops, .ct_owner = THIS_MODULE, }; static int nvmet_allowed_hosts_allow_link(struct config_item *parent, struct config_item *target) { struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); struct nvmet_host *host; struct nvmet_host_link *link, *p; int ret; if (target->ci_type != &nvmet_host_type) { pr_err("can only link hosts into the allowed_hosts directory!\n"); return -EINVAL; } host = to_host(target); link = kmalloc(sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; link->host = host; down_write(&nvmet_config_sem); ret = -EINVAL; if (subsys->allow_any_host) { pr_err("can't add hosts when allow_any_host is set!\n"); goto out_free_link; } ret = -EEXIST; list_for_each_entry(p, &subsys->hosts, entry) { if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) goto out_free_link; } list_add_tail(&link->entry, &subsys->hosts); nvmet_subsys_disc_changed(subsys, host); up_write(&nvmet_config_sem); return 0; out_free_link: up_write(&nvmet_config_sem); kfree(link); return ret; } static void nvmet_allowed_hosts_drop_link(struct config_item *parent, struct config_item *target) { struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); struct nvmet_host *host = to_host(target); struct nvmet_host_link *p; down_write(&nvmet_config_sem); list_for_each_entry(p, &subsys->hosts, entry) { if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) goto found; } up_write(&nvmet_config_sem); return; found: list_del(&p->entry); nvmet_subsys_disc_changed(subsys, host); up_write(&nvmet_config_sem); kfree(p); } static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { .allow_link = nvmet_allowed_hosts_allow_link, .drop_link = nvmet_allowed_hosts_drop_link, }; static const struct config_item_type nvmet_allowed_hosts_type = { .ct_item_ops = &nvmet_allowed_hosts_item_ops, .ct_owner = THIS_MODULE, }; static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->allow_any_host); } static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item); bool allow_any_host; int ret = 0; if (kstrtobool(page, &allow_any_host)) return -EINVAL; down_write(&nvmet_config_sem); if (allow_any_host && !list_empty(&subsys->hosts)) { pr_err("Can't set allow_any_host when explicit hosts are set!\n"); ret = -EINVAL; goto out_unlock; } if (subsys->allow_any_host != allow_any_host) { subsys->allow_any_host = allow_any_host; nvmet_subsys_disc_changed(subsys, NULL); } out_unlock: up_write(&nvmet_config_sem); return ret ? ret : count; } CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); if (NVME_TERTIARY(subsys->ver)) return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n", NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver), NVME_TERTIARY(subsys->ver)); return snprintf(page, PAGE_SIZE, "%llu.%llu\n", NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver)); } static ssize_t nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys, const char *page, size_t count) { int major, minor, tertiary = 0; int ret; if (subsys->subsys_discovered) { if (NVME_TERTIARY(subsys->ver)) pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n", NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver), NVME_TERTIARY(subsys->ver)); else pr_err("Can't set version number. %llu.%llu is already assigned\n", NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver)); return -EINVAL; } /* passthru subsystems use the underlying controller's version */ if (nvmet_is_passthru_subsys(subsys)) return -EINVAL; ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); if (ret != 2 && ret != 3) return -EINVAL; subsys->ver = NVME_VS(major, minor, tertiary); return count; } static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item); ssize_t ret; down_write(&nvmet_config_sem); mutex_lock(&subsys->lock); ret = nvmet_subsys_attr_version_store_locked(subsys, page, count); mutex_unlock(&subsys->lock); up_write(&nvmet_config_sem); return ret; } CONFIGFS_ATTR(nvmet_subsys_, attr_version); /* See Section 1.5 of NVMe 1.4 */ static bool nvmet_is_ascii(const char c) { return c >= 0x20 && c <= 0x7e; } static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); return snprintf(page, PAGE_SIZE, "%.*s\n", NVMET_SN_MAX_SIZE, subsys->serial); } static ssize_t nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys, const char *page, size_t count) { int pos, len = strcspn(page, "\n"); if (subsys->subsys_discovered) { pr_err("Can't set serial number. %s is already assigned\n", subsys->serial); return -EINVAL; } if (!len || len > NVMET_SN_MAX_SIZE) { pr_err("Serial Number can not be empty or exceed %d Bytes\n", NVMET_SN_MAX_SIZE); return -EINVAL; } for (pos = 0; pos < len; pos++) { if (!nvmet_is_ascii(page[pos])) { pr_err("Serial Number must contain only ASCII strings\n"); return -EINVAL; } } memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' '); return count; } static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item); ssize_t ret; down_write(&nvmet_config_sem); mutex_lock(&subsys->lock); ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count); mutex_unlock(&subsys->lock); up_write(&nvmet_config_sem); return ret; } CONFIGFS_ATTR(nvmet_subsys_, attr_serial); static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min); } static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item, const char *page, size_t cnt) { u16 cntlid_min; if (sscanf(page, "%hu\n", &cntlid_min) != 1) return -EINVAL; if (cntlid_min == 0) return -EINVAL; down_write(&nvmet_config_sem); if (cntlid_min >= to_subsys(item)->cntlid_max) goto out_unlock; to_subsys(item)->cntlid_min = cntlid_min; up_write(&nvmet_config_sem); return cnt; out_unlock: up_write(&nvmet_config_sem); return -EINVAL; } CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min); static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max); } static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item, const char *page, size_t cnt) { u16 cntlid_max; if (sscanf(page, "%hu\n", &cntlid_max) != 1) return -EINVAL; if (cntlid_max == 0) return -EINVAL; down_write(&nvmet_config_sem); if (cntlid_max <= to_subsys(item)->cntlid_min) goto out_unlock; to_subsys(item)->cntlid_max = cntlid_max; up_write(&nvmet_config_sem); return cnt; out_unlock: up_write(&nvmet_config_sem); return -EINVAL; } CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max); static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number); } static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, const char *page, size_t count) { int pos = 0, len; char *val; if (subsys->subsys_discovered) { pr_err("Can't set model number. %s is already assigned\n", subsys->model_number); return -EINVAL; } len = strcspn(page, "\n"); if (!len) return -EINVAL; if (len > NVMET_MN_MAX_SIZE) { pr_err("Model number size can not exceed %d Bytes\n", NVMET_MN_MAX_SIZE); return -EINVAL; } for (pos = 0; pos < len; pos++) { if (!nvmet_is_ascii(page[pos])) return -EINVAL; } val = kmemdup_nul(page, len, GFP_KERNEL); if (!val) return -ENOMEM; kfree(subsys->model_number); subsys->model_number = val; return count; } static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item); ssize_t ret; down_write(&nvmet_config_sem); mutex_lock(&subsys->lock); ret = nvmet_subsys_attr_model_store_locked(subsys, page, count); mutex_unlock(&subsys->lock); up_write(&nvmet_config_sem); return ret; } CONFIGFS_ATTR(nvmet_subsys_, attr_model); static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui); } static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys, const char *page, size_t count) { uint32_t val = 0; int ret; if (subsys->subsys_discovered) { pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n", subsys->ieee_oui); return -EINVAL; } ret = kstrtou32(page, 0, &val); if (ret < 0) return ret; if (val >= 0x1000000) return -EINVAL; subsys->ieee_oui = val; return count; } static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item); ssize_t ret; down_write(&nvmet_config_sem); mutex_lock(&subsys->lock); ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count); mutex_unlock(&subsys->lock); up_write(&nvmet_config_sem); return ret; } CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui); static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); return sysfs_emit(page, "%s\n", subsys->firmware_rev); } static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys, const char *page, size_t count) { int pos = 0, len; char *val; if (subsys->subsys_discovered) { pr_err("Can't set firmware revision. %s is already assigned\n", subsys->firmware_rev); return -EINVAL; } len = strcspn(page, "\n"); if (!len) return -EINVAL; if (len > NVMET_FR_MAX_SIZE) { pr_err("Firmware revision size can not exceed %d Bytes\n", NVMET_FR_MAX_SIZE); return -EINVAL; } for (pos = 0; pos < len; pos++) { if (!nvmet_is_ascii(page[pos])) return -EINVAL; } val = kmemdup_nul(page, len, GFP_KERNEL); if (!val) return -ENOMEM; kfree(subsys->firmware_rev); subsys->firmware_rev = val; return count; } static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item); ssize_t ret; down_write(&nvmet_config_sem); mutex_lock(&subsys->lock); ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count); mutex_unlock(&subsys->lock); up_write(&nvmet_config_sem); return ret; } CONFIGFS_ATTR(nvmet_subsys_, attr_firmware); #ifdef CONFIG_BLK_DEV_INTEGRITY static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support); } static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item); bool pi_enable; if (kstrtobool(page, &pi_enable)) return -EINVAL; subsys->pi_support = pi_enable; return count; } CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable); #endif static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid); } static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item, const char *page, size_t cnt) { struct nvmet_subsys *subsys = to_subsys(item); struct nvmet_ctrl *ctrl; u16 qid_max; if (sscanf(page, "%hu\n", &qid_max) != 1) return -EINVAL; if (qid_max < 1 || qid_max > NVMET_NR_QUEUES) return -EINVAL; down_write(&nvmet_config_sem); subsys->max_qid = qid_max; /* Force reconnect */ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) ctrl->ops->delete_ctrl(ctrl); up_write(&nvmet_config_sem); return cnt; } CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max); static struct configfs_attribute *nvmet_subsys_attrs[] = { &nvmet_subsys_attr_attr_allow_any_host, &nvmet_subsys_attr_attr_version, &nvmet_subsys_attr_attr_serial, &nvmet_subsys_attr_attr_cntlid_min, &nvmet_subsys_attr_attr_cntlid_max, &nvmet_subsys_attr_attr_model, &nvmet_subsys_attr_attr_qid_max, &nvmet_subsys_attr_attr_ieee_oui, &nvmet_subsys_attr_attr_firmware, #ifdef CONFIG_BLK_DEV_INTEGRITY &nvmet_subsys_attr_attr_pi_enable, #endif NULL, }; /* * Subsystem structures & folder operation functions below */ static void nvmet_subsys_release(struct config_item *item) { struct nvmet_subsys *subsys = to_subsys(item); nvmet_subsys_del_ctrls(subsys); nvmet_subsys_put(subsys); } static struct configfs_item_operations nvmet_subsys_item_ops = { .release = nvmet_subsys_release, }; static const struct config_item_type nvmet_subsys_type = { .ct_item_ops = &nvmet_subsys_item_ops, .ct_attrs = nvmet_subsys_attrs, .ct_owner = THIS_MODULE, }; static struct config_group *nvmet_subsys_make(struct config_group *group, const char *name) { struct nvmet_subsys *subsys; if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) { pr_err("can't create discovery subsystem through configfs\n"); return ERR_PTR(-EINVAL); } subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); if (IS_ERR(subsys)) return ERR_CAST(subsys); config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); config_group_init_type_name(&subsys->namespaces_group, "namespaces", &nvmet_namespaces_type); configfs_add_default_group(&subsys->namespaces_group, &subsys->group); config_group_init_type_name(&subsys->allowed_hosts_group, "allowed_hosts", &nvmet_allowed_hosts_type); configfs_add_default_group(&subsys->allowed_hosts_group, &subsys->group); nvmet_add_passthru_group(subsys); return &subsys->group; } static struct configfs_group_operations nvmet_subsystems_group_ops = { .make_group = nvmet_subsys_make, }; static const struct config_item_type nvmet_subsystems_type = { .ct_group_ops = &nvmet_subsystems_group_ops, .ct_owner = THIS_MODULE, }; static ssize_t nvmet_referral_enable_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled); } static ssize_t nvmet_referral_enable_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); struct nvmet_port *port = to_nvmet_port(item); bool enable; if (kstrtobool(page, &enable)) goto inval; if (enable) nvmet_referral_enable(parent, port); else nvmet_referral_disable(parent, port); return count; inval: pr_err("Invalid value '%s' for enable\n", page); return -EINVAL; } CONFIGFS_ATTR(nvmet_referral_, enable); /* * Discovery Service subsystem definitions */ static struct configfs_attribute *nvmet_referral_attrs[] = { &nvmet_attr_addr_adrfam, &nvmet_attr_addr_portid, &nvmet_attr_addr_treq, &nvmet_attr_addr_traddr, &nvmet_attr_addr_trsvcid, &nvmet_attr_addr_trtype, &nvmet_referral_attr_enable, NULL, }; static void nvmet_referral_notify(struct config_group *group, struct config_item *item) { struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); struct nvmet_port *port = to_nvmet_port(item); nvmet_referral_disable(parent, port); } static void nvmet_referral_release(struct config_item *item) { struct nvmet_port *port = to_nvmet_port(item); kfree(port); } static struct configfs_item_operations nvmet_referral_item_ops = { .release = nvmet_referral_release, }; static const struct config_item_type nvmet_referral_type = { .ct_owner = THIS_MODULE, .ct_attrs = nvmet_referral_attrs, .ct_item_ops = &nvmet_referral_item_ops, }; static struct config_group *nvmet_referral_make( struct config_group *group, const char *name) { struct nvmet_port *port; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&port->entry); config_group_init_type_name(&port->group, name, &nvmet_referral_type); return &port->group; } static struct configfs_group_operations nvmet_referral_group_ops = { .make_group = nvmet_referral_make, .disconnect_notify = nvmet_referral_notify, }; static const struct config_item_type nvmet_referrals_type = { .ct_owner = THIS_MODULE, .ct_group_ops = &nvmet_referral_group_ops, }; static struct nvmet_type_name_map nvmet_ana_state[] = { { NVME_ANA_OPTIMIZED, "optimized" }, { NVME_ANA_NONOPTIMIZED, "non-optimized" }, { NVME_ANA_INACCESSIBLE, "inaccessible" }, { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, { NVME_ANA_CHANGE, "change" }, }; static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, char *page) { struct nvmet_ana_group *grp = to_ana_group(item); enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; int i; for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { if (state == nvmet_ana_state[i].type) return sprintf(page, "%s\n", nvmet_ana_state[i].name); } return sprintf(page, "\n"); } static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, const char *page, size_t count) { struct nvmet_ana_group *grp = to_ana_group(item); enum nvme_ana_state *ana_state = grp->port->ana_state; int i; for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { if (sysfs_streq(page, nvmet_ana_state[i].name)) goto found; } pr_err("Invalid value '%s' for ana_state\n", page); return -EINVAL; found: down_write(&nvmet_ana_sem); ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type; nvmet_ana_chgcnt++; up_write(&nvmet_ana_sem); nvmet_port_send_ana_event(grp->port); return count; } CONFIGFS_ATTR(nvmet_ana_group_, ana_state); static struct configfs_attribute *nvmet_ana_group_attrs[] = { &nvmet_ana_group_attr_ana_state, NULL, }; static void nvmet_ana_group_release(struct config_item *item) { struct nvmet_ana_group *grp = to_ana_group(item); if (grp == &grp->port->ana_default_group) return; down_write(&nvmet_ana_sem); grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; nvmet_ana_group_enabled[grp->grpid]--; up_write(&nvmet_ana_sem); nvmet_port_send_ana_event(grp->port); kfree(grp); } static struct configfs_item_operations nvmet_ana_group_item_ops = { .release = nvmet_ana_group_release, }; static const struct config_item_type nvmet_ana_group_type = { .ct_item_ops = &nvmet_ana_group_item_ops, .ct_attrs = nvmet_ana_group_attrs, .ct_owner = THIS_MODULE, }; static struct config_group *nvmet_ana_groups_make_group( struct config_group *group, const char *name) { struct nvmet_port *port = ana_groups_to_port(&group->cg_item); struct nvmet_ana_group *grp; u32 grpid; int ret; ret = kstrtou32(name, 0, &grpid); if (ret) goto out; ret = -EINVAL; if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) goto out; ret = -ENOMEM; grp = kzalloc(sizeof(*grp), GFP_KERNEL); if (!grp) goto out; grp->port = port; grp->grpid = grpid; down_write(&nvmet_ana_sem); nvmet_ana_group_enabled[grpid]++; up_write(&nvmet_ana_sem); nvmet_port_send_ana_event(grp->port); config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); return &grp->group; out: return ERR_PTR(ret); } static struct configfs_group_operations nvmet_ana_groups_group_ops = { .make_group = nvmet_ana_groups_make_group, }; static const struct config_item_type nvmet_ana_groups_type = { .ct_group_ops = &nvmet_ana_groups_group_ops, .ct_owner = THIS_MODULE, }; /* * Ports definitions. */ static void nvmet_port_release(struct config_item *item) { struct nvmet_port *port = to_nvmet_port(item); /* Let inflight controllers teardown complete */ flush_workqueue(nvmet_wq); list_del(&port->global_entry); kfree(port->ana_state); kfree(port); } static struct configfs_attribute *nvmet_port_attrs[] = { &nvmet_attr_addr_adrfam, &nvmet_attr_addr_treq, &nvmet_attr_addr_traddr, &nvmet_attr_addr_trsvcid, &nvmet_attr_addr_trtype, &nvmet_attr_param_inline_data_size, #ifdef CONFIG_BLK_DEV_INTEGRITY &nvmet_attr_param_pi_enable, #endif NULL, }; static struct configfs_item_operations nvmet_port_item_ops = { .release = nvmet_port_release, }; static const struct config_item_type nvmet_port_type = { .ct_attrs = nvmet_port_attrs, .ct_item_ops = &nvmet_port_item_ops, .ct_owner = THIS_MODULE, }; static struct config_group *nvmet_ports_make(struct config_group *group, const char *name) { struct nvmet_port *port; u16 portid; u32 i; if (kstrtou16(name, 0, &portid)) return ERR_PTR(-EINVAL); port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return ERR_PTR(-ENOMEM); port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, sizeof(*port->ana_state), GFP_KERNEL); if (!port->ana_state) { kfree(port); return ERR_PTR(-ENOMEM); } for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { if (i == NVMET_DEFAULT_ANA_GRPID) port->ana_state[1] = NVME_ANA_OPTIMIZED; else port->ana_state[i] = NVME_ANA_INACCESSIBLE; } list_add(&port->global_entry, &nvmet_ports_list); INIT_LIST_HEAD(&port->entry); INIT_LIST_HEAD(&port->subsystems); INIT_LIST_HEAD(&port->referrals); port->inline_data_size = -1; /* < 0 == let the transport choose */ port->disc_addr.portid = cpu_to_le16(portid); port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW; config_group_init_type_name(&port->group, name, &nvmet_port_type); config_group_init_type_name(&port->subsys_group, "subsystems", &nvmet_port_subsys_type); configfs_add_default_group(&port->subsys_group, &port->group); config_group_init_type_name(&port->referrals_group, "referrals", &nvmet_referrals_type); configfs_add_default_group(&port->referrals_group, &port->group); config_group_init_type_name(&port->ana_groups_group, "ana_groups", &nvmet_ana_groups_type); configfs_add_default_group(&port->ana_groups_group, &port->group); port->ana_default_group.port = port; port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; config_group_init_type_name(&port->ana_default_group.group, __stringify(NVMET_DEFAULT_ANA_GRPID), &nvmet_ana_group_type); configfs_add_default_group(&port->ana_default_group.group, &port->ana_groups_group); return &port->group; } static struct configfs_group_operations nvmet_ports_group_ops = { .make_group = nvmet_ports_make, }; static const struct config_item_type nvmet_ports_type = { .ct_group_ops = &nvmet_ports_group_ops, .ct_owner = THIS_MODULE, }; static struct config_group nvmet_subsystems_group; static struct config_group nvmet_ports_group; #ifdef CONFIG_NVME_TARGET_AUTH static ssize_t nvmet_host_dhchap_key_show(struct config_item *item, char *page) { u8 *dhchap_secret = to_host(item)->dhchap_secret; if (!dhchap_secret) return sprintf(page, "\n"); return sprintf(page, "%s\n", dhchap_secret); } static ssize_t nvmet_host_dhchap_key_store(struct config_item *item, const char *page, size_t count) { struct nvmet_host *host = to_host(item); int ret; ret = nvmet_auth_set_key(host, page, false); /* * Re-authentication is a soft state, so keep the * current authentication valid until the host * requests re-authentication. */ return ret < 0 ? ret : count; } CONFIGFS_ATTR(nvmet_host_, dhchap_key); static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item, char *page) { u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret; if (!dhchap_secret) return sprintf(page, "\n"); return sprintf(page, "%s\n", dhchap_secret); } static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item, const char *page, size_t count) { struct nvmet_host *host = to_host(item); int ret; ret = nvmet_auth_set_key(host, page, true); /* * Re-authentication is a soft state, so keep the * current authentication valid until the host * requests re-authentication. */ return ret < 0 ? ret : count; } CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key); static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item, char *page) { struct nvmet_host *host = to_host(item); const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); return sprintf(page, "%s\n", hash_name ? hash_name : "none"); } static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item, const char *page, size_t count) { struct nvmet_host *host = to_host(item); u8 hmac_id; hmac_id = nvme_auth_hmac_id(page); if (hmac_id == NVME_AUTH_HASH_INVALID) return -EINVAL; if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0)) return -ENOTSUPP; host->dhchap_hash_id = hmac_id; return count; } CONFIGFS_ATTR(nvmet_host_, dhchap_hash); static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item, char *page) { struct nvmet_host *host = to_host(item); const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id); return sprintf(page, "%s\n", dhgroup ? dhgroup : "none"); } static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item, const char *page, size_t count) { struct nvmet_host *host = to_host(item); int dhgroup_id; dhgroup_id = nvme_auth_dhgroup_id(page); if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID) return -EINVAL; if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) { const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id); if (!crypto_has_kpp(kpp, 0, 0)) return -EINVAL; } host->dhchap_dhgroup_id = dhgroup_id; return count; } CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup); static struct configfs_attribute *nvmet_host_attrs[] = { &nvmet_host_attr_dhchap_key, &nvmet_host_attr_dhchap_ctrl_key, &nvmet_host_attr_dhchap_hash, &nvmet_host_attr_dhchap_dhgroup, NULL, }; #endif /* CONFIG_NVME_TARGET_AUTH */ static void nvmet_host_release(struct config_item *item) { struct nvmet_host *host = to_host(item); #ifdef CONFIG_NVME_TARGET_AUTH kfree(host->dhchap_secret); kfree(host->dhchap_ctrl_secret); #endif kfree(host); } static struct configfs_item_operations nvmet_host_item_ops = { .release = nvmet_host_release, }; static const struct config_item_type nvmet_host_type = { .ct_item_ops = &nvmet_host_item_ops, #ifdef CONFIG_NVME_TARGET_AUTH .ct_attrs = nvmet_host_attrs, #endif .ct_owner = THIS_MODULE, }; static struct config_group *nvmet_hosts_make_group(struct config_group *group, const char *name) { struct nvmet_host *host; host = kzalloc(sizeof(*host), GFP_KERNEL); if (!host) return ERR_PTR(-ENOMEM); #ifdef CONFIG_NVME_TARGET_AUTH /* Default to SHA256 */ host->dhchap_hash_id = NVME_AUTH_HASH_SHA256; #endif config_group_init_type_name(&host->group, name, &nvmet_host_type); return &host->group; } static struct configfs_group_operations nvmet_hosts_group_ops = { .make_group = nvmet_hosts_make_group, }; static const struct config_item_type nvmet_hosts_type = { .ct_group_ops = &nvmet_hosts_group_ops, .ct_owner = THIS_MODULE, }; static struct config_group nvmet_hosts_group; static const struct config_item_type nvmet_root_type = { .ct_owner = THIS_MODULE, }; static struct configfs_subsystem nvmet_configfs_subsystem = { .su_group = { .cg_item = { .ci_namebuf = "nvmet", .ci_type = &nvmet_root_type, }, }, }; int __init nvmet_init_configfs(void) { int ret; config_group_init(&nvmet_configfs_subsystem.su_group); mutex_init(&nvmet_configfs_subsystem.su_mutex); config_group_init_type_name(&nvmet_subsystems_group, "subsystems", &nvmet_subsystems_type); configfs_add_default_group(&nvmet_subsystems_group, &nvmet_configfs_subsystem.su_group); config_group_init_type_name(&nvmet_ports_group, "ports", &nvmet_ports_type); configfs_add_default_group(&nvmet_ports_group, &nvmet_configfs_subsystem.su_group); config_group_init_type_name(&nvmet_hosts_group, "hosts", &nvmet_hosts_type); configfs_add_default_group(&nvmet_hosts_group, &nvmet_configfs_subsystem.su_group); ret = configfs_register_subsystem(&nvmet_configfs_subsystem); if (ret) { pr_err("configfs_register_subsystem: %d\n", ret); return ret; } return 0; } void __exit nvmet_exit_configfs(void) { configfs_unregister_subsystem(&nvmet_configfs_subsystem); }
linux-master
drivers/nvme/target/configfs.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe Fabrics command implementation. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/blkdev.h> #include "nvmet.h" static void nvmet_execute_prop_set(struct nvmet_req *req) { u64 val = le64_to_cpu(req->cmd->prop_set.value); u16 status = 0; if (!nvmet_check_transfer_len(req, 0)) return; if (req->cmd->prop_set.attrib & 1) { req->error_loc = offsetof(struct nvmf_property_set_command, attrib); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out; } switch (le32_to_cpu(req->cmd->prop_set.offset)) { case NVME_REG_CC: nvmet_update_cc(req->sq->ctrl, val); break; default: req->error_loc = offsetof(struct nvmf_property_set_command, offset); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; } out: nvmet_req_complete(req, status); } static void nvmet_execute_prop_get(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; u16 status = 0; u64 val = 0; if (!nvmet_check_transfer_len(req, 0)) return; if (req->cmd->prop_get.attrib & 1) { switch (le32_to_cpu(req->cmd->prop_get.offset)) { case NVME_REG_CAP: val = ctrl->cap; break; default: status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; } } else { switch (le32_to_cpu(req->cmd->prop_get.offset)) { case NVME_REG_VS: val = ctrl->subsys->ver; break; case NVME_REG_CC: val = ctrl->cc; break; case NVME_REG_CSTS: val = ctrl->csts; break; default: status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; } } if (status && req->cmd->prop_get.attrib & 1) { req->error_loc = offsetof(struct nvmf_property_get_command, offset); } else { req->error_loc = offsetof(struct nvmf_property_get_command, attrib); } req->cqe->result.u64 = cpu_to_le64(val); nvmet_req_complete(req, status); } u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; switch (cmd->fabrics.fctype) { case nvme_fabrics_type_property_set: req->execute = nvmet_execute_prop_set; break; case nvme_fabrics_type_property_get: req->execute = nvmet_execute_prop_get; break; #ifdef CONFIG_NVME_TARGET_AUTH case nvme_fabrics_type_auth_send: req->execute = nvmet_execute_auth_send; break; case nvme_fabrics_type_auth_receive: req->execute = nvmet_execute_auth_receive; break; #endif default: pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } return 0; } u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; switch (cmd->fabrics.fctype) { #ifdef CONFIG_NVME_TARGET_AUTH case nvme_fabrics_type_auth_send: req->execute = nvmet_execute_auth_send; break; case nvme_fabrics_type_auth_receive: req->execute = nvmet_execute_auth_receive; break; #endif default: pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } return 0; } static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) { struct nvmf_connect_command *c = &req->cmd->connect; u16 qid = le16_to_cpu(c->qid); u16 sqsize = le16_to_cpu(c->sqsize); struct nvmet_ctrl *old; u16 mqes = NVME_CAP_MQES(ctrl->cap); u16 ret; if (!sqsize) { pr_warn("queue size zero!\n"); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; goto err; } if (ctrl->sqs[qid] != NULL) { pr_warn("qid %u has already been created\n", qid); req->error_loc = offsetof(struct nvmf_connect_command, qid); return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; } if (sqsize > mqes) { pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n", sqsize, mqes, ctrl->cntlid); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; } old = cmpxchg(&req->sq->ctrl, NULL, ctrl); if (old) { pr_warn("queue already connected!\n"); req->error_loc = offsetof(struct nvmf_connect_command, opcode); return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; } /* note: convert queue size from 0's-based value to 1's-based value */ nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) { req->sq->sqhd_disabled = true; req->cqe->sq_head = cpu_to_le16(0xffff); } if (ctrl->ops->install_queue) { ret = ctrl->ops->install_queue(req->sq); if (ret) { pr_err("failed to install queue %d cntlid %d ret %x\n", qid, ctrl->cntlid, ret); ctrl->sqs[qid] = NULL; goto err; } } return 0; err: req->sq->ctrl = NULL; return ret; } static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl) { return (u32)ctrl->cntlid | (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0); } static void nvmet_execute_admin_connect(struct nvmet_req *req) { struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl = NULL; u16 status = 0; int ret; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; d = kmalloc(sizeof(*d), GFP_KERNEL); if (!d) { status = NVME_SC_INTERNAL; goto complete; } status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); if (status) goto out; /* zero out initial completion result, assign values as needed */ req->cqe->result.u32 = 0; if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); req->error_loc = offsetof(struct nvmf_connect_command, recfmt); status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; goto out; } if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { pr_warn("connect attempt for invalid controller ID %#x\n", d->cntlid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); goto out; } status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, le32_to_cpu(c->kato), &ctrl); if (status) goto out; ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; uuid_copy(&ctrl->hostid, &d->hostid); ret = nvmet_setup_auth(ctrl); if (ret < 0) { pr_err("Failed to setup authentication, error %d\n", ret); nvmet_ctrl_put(ctrl); if (ret == -EPERM) status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); else status = NVME_SC_INTERNAL; goto out; } status = nvmet_install_queue(ctrl, req); if (status) { nvmet_ctrl_put(ctrl); goto out; } pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n", nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, ctrl->pi_support ? " T10-PI is enabled" : "", nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : ""); req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl)); out: kfree(d); complete: nvmet_req_complete(req, status); } static void nvmet_execute_io_connect(struct nvmet_req *req) { struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl; u16 qid = le16_to_cpu(c->qid); u16 status = 0; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; d = kmalloc(sizeof(*d), GFP_KERNEL); if (!d) { status = NVME_SC_INTERNAL; goto complete; } status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); if (status) goto out; /* zero out initial completion result, assign values as needed */ req->cqe->result.u32 = 0; if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; goto out; } ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, le16_to_cpu(d->cntlid), req); if (!ctrl) { status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; goto out; } if (unlikely(qid > ctrl->subsys->max_qid)) { pr_warn("invalid queue id (%d)\n", qid); status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); goto out_ctrl_put; } status = nvmet_install_queue(ctrl, req); if (status) goto out_ctrl_put; pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl)); out: kfree(d); complete: nvmet_req_complete(req, status); return; out_ctrl_put: nvmet_ctrl_put(ctrl); goto out; } u16 nvmet_parse_connect_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; if (!nvme_is_fabrics(cmd)) { pr_debug("invalid command 0x%x on unconnected queue.\n", cmd->fabrics.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { pr_debug("invalid capsule type 0x%x on unconnected queue.\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } if (cmd->connect.qid == 0) req->execute = nvmet_execute_admin_connect; else req->execute = nvmet_execute_io_connect; return 0; }
linux-master
drivers/nvme/target/fabrics-cmd.c
// SPDX-License-Identifier: GPL-2.0 /* * Discovery service for the NVMe over Fabrics target. * Copyright (C) 2016 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <generated/utsrelease.h> #include "nvmet.h" struct nvmet_subsys *nvmet_disc_subsys; static u64 nvmet_genctr; static void __nvmet_disc_changed(struct nvmet_port *port, struct nvmet_ctrl *ctrl) { if (ctrl->port != port) return; if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE)) return; nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC); } void nvmet_port_disc_changed(struct nvmet_port *port, struct nvmet_subsys *subsys) { struct nvmet_ctrl *ctrl; lockdep_assert_held(&nvmet_config_sem); nvmet_genctr++; mutex_lock(&nvmet_disc_subsys->lock); list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn)) continue; __nvmet_disc_changed(port, ctrl); } mutex_unlock(&nvmet_disc_subsys->lock); /* If transport can signal change, notify transport */ if (port->tr_ops && port->tr_ops->discovery_chg) port->tr_ops->discovery_chg(port); } static void __nvmet_subsys_disc_changed(struct nvmet_port *port, struct nvmet_subsys *subsys, struct nvmet_host *host) { struct nvmet_ctrl *ctrl; mutex_lock(&nvmet_disc_subsys->lock); list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn)) continue; __nvmet_disc_changed(port, ctrl); } mutex_unlock(&nvmet_disc_subsys->lock); } void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, struct nvmet_host *host) { struct nvmet_port *port; struct nvmet_subsys_link *s; lockdep_assert_held(&nvmet_config_sem); nvmet_genctr++; list_for_each_entry(port, nvmet_ports, global_entry) list_for_each_entry(s, &port->subsystems, entry) { if (s->subsys != subsys) continue; __nvmet_subsys_disc_changed(port, subsys, host); } } void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port) { down_write(&nvmet_config_sem); if (list_empty(&port->entry)) { list_add_tail(&port->entry, &parent->referrals); port->enabled = true; nvmet_port_disc_changed(parent, NULL); } up_write(&nvmet_config_sem); } void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port) { down_write(&nvmet_config_sem); if (!list_empty(&port->entry)) { port->enabled = false; list_del_init(&port->entry); nvmet_port_disc_changed(parent, NULL); } up_write(&nvmet_config_sem); } static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, struct nvmet_port *port, char *subsys_nqn, char *traddr, u8 type, u32 numrec) { struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec]; e->trtype = port->disc_addr.trtype; e->adrfam = port->disc_addr.adrfam; e->treq = port->disc_addr.treq; e->portid = port->disc_addr.portid; /* we support only dynamic controllers */ e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); e->asqsz = cpu_to_le16(NVME_AQ_DEPTH); e->subtype = type; memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); } /* * nvmet_set_disc_traddr - set a correct discovery log entry traddr * * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply * must not contain that "any" IP address. If the transport implements * .disc_traddr, use it. this callback will set the discovery traddr * from the req->port address in case the port in question listens * "any" IP address. */ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port, char *traddr) { if (req->ops->disc_traddr) req->ops->disc_traddr(req, port, traddr); else memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); } static size_t discovery_log_entries(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_subsys_link *p; struct nvmet_port *r; size_t entries = 1; list_for_each_entry(p, &req->port->subsystems, entry) { if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) continue; entries++; } list_for_each_entry(r, &req->port->referrals, entry) entries++; return entries; } static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) { const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_disc_rsp_page_hdr *hdr; u64 offset = nvmet_get_log_page_offset(req->cmd); size_t data_len = nvmet_get_log_page_len(req->cmd); size_t alloc_len; struct nvmet_subsys_link *p; struct nvmet_port *r; u32 numrec = 0; u16 status = 0; void *buffer; char traddr[NVMF_TRADDR_SIZE]; if (!nvmet_check_transfer_len(req, data_len)) return; if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { req->error_loc = offsetof(struct nvme_get_log_page_command, lid); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out; } /* Spec requires dword aligned offsets */ if (offset & 0x3) { req->error_loc = offsetof(struct nvme_get_log_page_command, lpo); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out; } /* * Make sure we're passing at least a buffer of response header size. * If host provided data len is less than the header size, only the * number of bytes requested by host will be sent to host. */ down_read(&nvmet_config_sem); alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); buffer = kzalloc(alloc_len, GFP_KERNEL); if (!buffer) { up_read(&nvmet_config_sem); status = NVME_SC_INTERNAL; goto out; } hdr = buffer; nvmet_set_disc_traddr(req, req->port, traddr); nvmet_format_discovery_entry(hdr, req->port, nvmet_disc_subsys->subsysnqn, traddr, NVME_NQN_CURR, numrec); numrec++; list_for_each_entry(p, &req->port->subsystems, entry) { if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) continue; nvmet_format_discovery_entry(hdr, req->port, p->subsys->subsysnqn, traddr, NVME_NQN_NVME, numrec); numrec++; } list_for_each_entry(r, &req->port->referrals, entry) { nvmet_format_discovery_entry(hdr, r, NVME_DISC_SUBSYS_NAME, r->disc_addr.traddr, NVME_NQN_DISC, numrec); numrec++; } hdr->genctr = cpu_to_le64(nvmet_genctr); hdr->numrec = cpu_to_le64(numrec); hdr->recfmt = cpu_to_le16(0); nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE); up_read(&nvmet_config_sem); status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); kfree(buffer); out: nvmet_req_complete(req, status); } static void nvmet_execute_disc_identify(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl *id; u16 status = 0; if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) return; if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { req->error_loc = offsetof(struct nvme_identify, cns); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out; } id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { status = NVME_SC_INTERNAL; goto out; } memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); memset(id->fr, ' ', sizeof(id->fr)); memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number, strlen(ctrl->subsys->model_number), ' '); memcpy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE), ' '); id->cntrltype = NVME_CTRL_DISC; /* no limit on data transfer sizes for now */ id->mdts = 0; id->cntlid = cpu_to_le16(ctrl->cntlid); id->ver = cpu_to_le32(ctrl->subsys->ver); id->lpa = (1 << 2); /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ if (ctrl->ops->flags & NVMF_KEYED_SGLS) id->sgls |= cpu_to_le32(1 << 2); if (req->port->inline_data_size) id->sgls |= cpu_to_le32(1 << 20); id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL); strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); kfree(id); out: nvmet_req_complete(req, status); } static void nvmet_execute_disc_set_features(struct nvmet_req *req) { u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 stat; if (!nvmet_check_transfer_len(req, 0)) return; switch (cdw10 & 0xff) { case NVME_FEAT_KATO: stat = nvmet_set_feat_kato(req); break; case NVME_FEAT_ASYNC_EVENT: stat = nvmet_set_feat_async_event(req, NVMET_DISC_AEN_CFG_OPTIONAL); break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; } nvmet_req_complete(req, stat); } static void nvmet_execute_disc_get_features(struct nvmet_req *req) { u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 stat = 0; if (!nvmet_check_transfer_len(req, 0)) return; switch (cdw10 & 0xff) { case NVME_FEAT_KATO: nvmet_get_feat_kato(req); break; case NVME_FEAT_ASYNC_EVENT: nvmet_get_feat_async_event(req); break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; } nvmet_req_complete(req, stat); } u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { pr_err("got cmd %d while not ready\n", cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } switch (cmd->common.opcode) { case nvme_admin_set_features: req->execute = nvmet_execute_disc_set_features; return 0; case nvme_admin_get_features: req->execute = nvmet_execute_disc_get_features; return 0; case nvme_admin_async_event: req->execute = nvmet_execute_async_event; return 0; case nvme_admin_keep_alive: req->execute = nvmet_execute_keep_alive; return 0; case nvme_admin_get_log_page: req->execute = nvmet_execute_disc_get_log_page; return 0; case nvme_admin_identify: req->execute = nvmet_execute_disc_identify; return 0; default: pr_debug("unhandled cmd %d\n", cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } } int __init nvmet_init_discovery(void) { nvmet_disc_subsys = nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR); return PTR_ERR_OR_ZERO(nvmet_disc_subsys); } void nvmet_exit_discovery(void) { nvmet_subsys_put(nvmet_disc_subsys); }
linux-master
drivers/nvme/target/discovery.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe ZNS-ZBD command implementation. * Copyright (C) 2021 Western Digital Corporation or its affiliates. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/nvme.h> #include <linux/blkdev.h> #include "nvmet.h" /* * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0 * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k * as page_shift value. When calculating the ZASL use shift by 12. */ #define NVMET_MPSMIN_SHIFT 12 static inline u8 nvmet_zasl(unsigned int zone_append_sects) { /* * Zone Append Size Limit (zasl) is expressed as a power of 2 value * with the minimum memory page size (i.e. 12) as unit. */ return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9)); } static int validate_conv_zones_cb(struct blk_zone *z, unsigned int i, void *data) { if (z->type == BLK_ZONE_TYPE_CONVENTIONAL) return -EOPNOTSUPP; return 0; } bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) { u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev)); struct gendisk *bd_disk = ns->bdev->bd_disk; int ret; if (ns->subsys->zasl) { if (ns->subsys->zasl > zasl) return false; } ns->subsys->zasl = zasl; /* * Generic zoned block devices may have a smaller last zone which is * not supported by ZNS. Exclude zoned drives that have such smaller * last zone. */ if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) return false; /* * ZNS does not define a conventional zone type. If the underlying * device has a bitmap set indicating the existence of conventional * zones, reject the device. Otherwise, use report zones to detect if * the device has conventional zones. */ if (ns->bdev->bd_disk->conv_zones_bitmap) return false; ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev), validate_conv_zones_cb, NULL); if (ret < 0) return false; ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); return true; } void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req) { u8 zasl = req->sq->ctrl->subsys->zasl; struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl_zns *id; u16 status; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { status = NVME_SC_INTERNAL; goto out; } if (ctrl->ops->get_mdts) id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl); else id->zasl = zasl; status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); kfree(id); out: nvmet_req_complete(req, status); } void nvmet_execute_identify_ns_zns(struct nvmet_req *req) { struct nvme_id_ns_zns *id_zns = NULL; u64 zsze; u16 status; u32 mar, mor; if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); status = NVME_SC_INVALID_NS | NVME_SC_DNR; goto out; } id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL); if (!id_zns) { status = NVME_SC_INTERNAL; goto out; } status = nvmet_req_find_ns(req); if (status) goto done; if (nvmet_ns_revalidate(req->ns)) { mutex_lock(&req->ns->subsys->lock); nvmet_ns_changed(req->ns->subsys, req->ns->nsid); mutex_unlock(&req->ns->subsys->lock); } if (!bdev_is_zoned(req->ns->bdev)) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvme_identify, nsid); goto out; } zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> req->ns->blksize_shift; id_zns->lbafe[0].zsze = cpu_to_le64(zsze); mor = bdev_max_open_zones(req->ns->bdev); if (!mor) mor = U32_MAX; else mor--; id_zns->mor = cpu_to_le32(mor); mar = bdev_max_active_zones(req->ns->bdev); if (!mar) mar = U32_MAX; else mar--; id_zns->mar = cpu_to_le32(mar); done: status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); out: kfree(id_zns); nvmet_req_complete(req, status); } static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) { sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); return NVME_SC_LBA_RANGE | NVME_SC_DNR; } if (out_bufsize < sizeof(struct nvme_zone_report)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } switch (req->cmd->zmr.pr) { case 0: case 1: break; default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } switch (req->cmd->zmr.zrasf) { case NVME_ZRASF_ZONE_REPORT_ALL: case NVME_ZRASF_ZONE_STATE_EMPTY: case NVME_ZRASF_ZONE_STATE_IMP_OPEN: case NVME_ZRASF_ZONE_STATE_EXP_OPEN: case NVME_ZRASF_ZONE_STATE_CLOSED: case NVME_ZRASF_ZONE_STATE_FULL: case NVME_ZRASF_ZONE_STATE_READONLY: case NVME_ZRASF_ZONE_STATE_OFFLINE: break; default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } return NVME_SC_SUCCESS; } struct nvmet_report_zone_data { struct nvmet_req *req; u64 out_buf_offset; u64 out_nr_zones; u64 nr_zones; u8 zrasf; }; static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d) { static const unsigned int nvme_zrasf_to_blk_zcond[] = { [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY, [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN, [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN, [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED, [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY, [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL, [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE, }; struct nvmet_report_zone_data *rz = d; if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL && z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf]) return 0; if (rz->nr_zones < rz->out_nr_zones) { struct nvme_zone_descriptor zdesc = { }; u16 status; zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); zdesc.za = z->reset ? 1 << 2 : 0; zdesc.zs = z->cond << 4; zdesc.zt = z->type; status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, sizeof(zdesc)); if (status) return -EINVAL; rz->out_buf_offset += sizeof(zdesc); } rz->nr_zones++; return 0; } static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) { unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); return bdev_nr_zones(req->ns->bdev) - bdev_zone_no(req->ns->bdev, sect); } static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize) { if (bufsize <= sizeof(struct nvme_zone_report)) return 0; return (bufsize - sizeof(struct nvme_zone_report)) / sizeof(struct nvme_zone_descriptor); } static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req); u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; __le64 nr_zones; u16 status; int ret; struct nvmet_report_zone_data rz_data = { .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize), /* leave the place for report zone header */ .out_buf_offset = sizeof(struct nvme_zone_report), .zrasf = req->cmd->zmr.zrasf, .nr_zones = 0, .req = req, }; status = nvmet_bdev_validate_zone_mgmt_recv(req); if (status) goto out; if (!req_slba_nr_zones) { status = NVME_SC_SUCCESS; goto out; } ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, nvmet_bdev_report_zone_cb, &rz_data); if (ret < 0) { status = NVME_SC_INTERNAL; goto out; } /* * When partial bit is set nr_zones must indicate the number of zone * descriptors actually transferred. */ if (req->cmd->zmr.pr) rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones); nr_zones = cpu_to_le64(rz_data.nr_zones); status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); out: nvmet_req_complete(req, status); } void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) { INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); queue_work(zbd_wq, &req->z.zmgmt_work); } static inline enum req_op zsa_req_op(u8 zsa) { switch (zsa) { case NVME_ZONE_OPEN: return REQ_OP_ZONE_OPEN; case NVME_ZONE_CLOSE: return REQ_OP_ZONE_CLOSE; case NVME_ZONE_FINISH: return REQ_OP_ZONE_FINISH; case NVME_ZONE_RESET: return REQ_OP_ZONE_RESET; default: return REQ_OP_LAST; } } static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) { switch (ret) { case 0: return NVME_SC_SUCCESS; case -EINVAL: case -EIO: return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; default: return NVME_SC_INTERNAL; } } struct nvmet_zone_mgmt_send_all_data { unsigned long *zbitmap; struct nvmet_req *req; }; static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d) { struct nvmet_zone_mgmt_send_all_data *data = d; switch (zsa_req_op(data->req->cmd->zms.zsa)) { case REQ_OP_ZONE_OPEN: switch (z->cond) { case BLK_ZONE_COND_CLOSED: break; default: return 0; } break; case REQ_OP_ZONE_CLOSE: switch (z->cond) { case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: break; default: return 0; } break; case REQ_OP_ZONE_FINISH: switch (z->cond) { case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: case BLK_ZONE_COND_CLOSED: break; default: return 0; } break; default: return -EINVAL; } set_bit(i, data->zbitmap); return 0; } static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) { struct block_device *bdev = req->ns->bdev; unsigned int nr_zones = bdev_nr_zones(bdev); struct bio *bio = NULL; sector_t sector = 0; int ret; struct nvmet_zone_mgmt_send_all_data d = { .req = req, }; d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)), GFP_NOIO, bdev->bd_disk->node_id); if (!d.zbitmap) { ret = -ENOMEM; goto out; } /* Scan and build bitmap of the eligible zones */ ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d); if (ret != nr_zones) { if (ret > 0) ret = -EIO; goto out; } else { /* We scanned all the zones */ ret = 0; } while (sector < bdev_nr_sectors(bdev)) { if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) { bio = blk_next_bio(bio, bdev, 0, zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC, GFP_KERNEL); bio->bi_iter.bi_sector = sector; /* This may take a while, so be nice to others */ cond_resched(); } sector += bdev_zone_sectors(bdev); } if (bio) { ret = submit_bio_wait(bio); bio_put(bio); } out: kfree(d.zbitmap); return blkdev_zone_mgmt_errno_to_nvme_status(ret); } static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) { int ret; switch (zsa_req_op(req->cmd->zms.zsa)) { case REQ_OP_ZONE_RESET: ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, get_capacity(req->ns->bdev->bd_disk), GFP_KERNEL); if (ret < 0) return blkdev_zone_mgmt_errno_to_nvme_status(ret); break; case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_FINISH: return nvmet_bdev_zone_mgmt_emulate_all(req); default: /* this is needed to quiet compiler warning */ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); return NVME_SC_INVALID_FIELD | NVME_SC_DNR; } return NVME_SC_SUCCESS; } static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); enum req_op op = zsa_req_op(req->cmd->zms.zsa); struct block_device *bdev = req->ns->bdev; sector_t zone_sectors = bdev_zone_sectors(bdev); u16 status = NVME_SC_SUCCESS; int ret; if (op == REQ_OP_LAST) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; goto out; } /* when select all bit is set slba field is ignored */ if (req->cmd->zms.select_all) { status = nvmet_bdev_execute_zmgmt_send_all(req); goto out; } if (sect >= get_capacity(bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); status = NVME_SC_LBA_RANGE | NVME_SC_DNR; goto out; } if (sect & (zone_sectors - 1)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out; } ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL); if (ret < 0) status = blkdev_zone_mgmt_errno_to_nvme_status(ret); out: nvmet_req_complete(req, status); } void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req) { INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); queue_work(zbd_wq, &req->z.zmgmt_work); } static void nvmet_bdev_zone_append_bio_done(struct bio *bio) { struct nvmet_req *req = bio->bi_private; if (bio->bi_status == BLK_STS_OK) { req->cqe->result.u64 = nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); } nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); nvmet_req_bio_put(req, bio); } void nvmet_bdev_execute_zone_append(struct nvmet_req *req) { sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; u16 status = NVME_SC_SUCCESS; unsigned int total_len = 0; struct scatterlist *sg; struct bio *bio; int sg_cnt; /* Request is completed on len mismatch in nvmet_check_transter_len() */ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) return; if (!req->sg_cnt) { nvmet_req_complete(req, 0); return; } if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_rw_command, slba); status = NVME_SC_LBA_RANGE | NVME_SC_DNR; goto out; } if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { req->error_loc = offsetof(struct nvme_rw_command, slba); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out; } if (nvmet_use_inline_bvec(req)) { bio = &req->z.inline_bio; bio_init(bio, req->ns->bdev, req->inline_bvec, ARRAY_SIZE(req->inline_bvec), opf); } else { bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); } bio->bi_end_io = nvmet_bdev_zone_append_bio_done; bio->bi_iter.bi_sector = sect; bio->bi_private = req; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) bio->bi_opf |= REQ_FUA; for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { struct page *p = sg_page(sg); unsigned int l = sg->length; unsigned int o = sg->offset; unsigned int ret; ret = bio_add_zone_append_page(bio, p, l, o); if (ret != sg->length) { status = NVME_SC_INTERNAL; goto out_put_bio; } total_len += sg->length; } if (total_len != nvmet_rw_data_len(req)) { status = NVME_SC_INTERNAL | NVME_SC_DNR; goto out_put_bio; } submit_bio(bio); return; out_put_bio: nvmet_req_bio_put(req, bio); out: nvmet_req_complete(req, status); } u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; switch (cmd->common.opcode) { case nvme_cmd_zone_append: req->execute = nvmet_bdev_execute_zone_append; return 0; case nvme_cmd_zone_mgmt_recv: req->execute = nvmet_bdev_execute_zone_mgmt_recv; return 0; case nvme_cmd_zone_mgmt_send: req->execute = nvmet_bdev_execute_zone_mgmt_send; return 0; default: return nvmet_bdev_parse_io_cmd(req); } }
linux-master
drivers/nvme/target/zns.c
// SPDX-License-Identifier: GPL-2.0 /* * Common code for the NVMe target. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/pci-p2pdma.h> #include <linux/scatterlist.h> #include <generated/utsrelease.h> #define CREATE_TRACE_POINTS #include "trace.h" #include "nvmet.h" struct kmem_cache *nvmet_bvec_cache; struct workqueue_struct *buffered_io_wq; struct workqueue_struct *zbd_wq; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static DEFINE_IDA(cntlid_ida); struct workqueue_struct *nvmet_wq; EXPORT_SYMBOL_GPL(nvmet_wq); /* * This read/write semaphore is used to synchronize access to configuration * information on a target system that will result in discovery log page * information change for at least one host. * The full list of resources to protected by this semaphore is: * * - subsystems list * - per-subsystem allowed hosts list * - allow_any_host subsystem attribute * - nvmet_genctr * - the nvmet_transports array * * When updating any of those lists/structures write lock should be obtained, * while when reading (popolating discovery log page or checking host-subsystem * link) read lock is obtained to allow concurrent reads. */ DECLARE_RWSEM(nvmet_config_sem); u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; u64 nvmet_ana_chgcnt; DECLARE_RWSEM(nvmet_ana_sem); inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) { switch (errno) { case 0: return NVME_SC_SUCCESS; case -ENOSPC: req->error_loc = offsetof(struct nvme_rw_command, length); return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; case -EREMOTEIO: req->error_loc = offsetof(struct nvme_rw_command, slba); return NVME_SC_LBA_RANGE | NVME_SC_DNR; case -EOPNOTSUPP: req->error_loc = offsetof(struct nvme_common_command, opcode); switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; default: return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } break; case -ENODATA: req->error_loc = offsetof(struct nvme_rw_command, nsid); return NVME_SC_ACCESS_DENIED; case -EIO: fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); return NVME_SC_INTERNAL | NVME_SC_DNR; } } u16 nvmet_report_invalid_opcode(struct nvmet_req *req) { pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode, req->sq->qid); req->error_loc = offsetof(struct nvme_common_command, opcode); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, const char *subsysnqn); u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, size_t len) { if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; } return 0; } u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) { if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; } return 0; } u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) { if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; } return 0; } static u32 nvmet_max_nsid(struct nvmet_subsys *subsys) { struct nvmet_ns *cur; unsigned long idx; u32 nsid = 0; xa_for_each(&subsys->namespaces, idx, cur) nsid = cur->nsid; return nsid; } static u32 nvmet_async_event_result(struct nvmet_async_event *aen) { return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); } static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) { struct nvmet_req *req; mutex_lock(&ctrl->lock); while (ctrl->nr_async_event_cmds) { req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; mutex_unlock(&ctrl->lock); nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); mutex_lock(&ctrl->lock); } mutex_unlock(&ctrl->lock); } static void nvmet_async_events_process(struct nvmet_ctrl *ctrl) { struct nvmet_async_event *aen; struct nvmet_req *req; mutex_lock(&ctrl->lock); while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) { aen = list_first_entry(&ctrl->async_events, struct nvmet_async_event, entry); req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; nvmet_set_result(req, nvmet_async_event_result(aen)); list_del(&aen->entry); kfree(aen); mutex_unlock(&ctrl->lock); trace_nvmet_async_event(ctrl, req->cqe->result.u32); nvmet_req_complete(req, 0); mutex_lock(&ctrl->lock); } mutex_unlock(&ctrl->lock); } static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) { struct nvmet_async_event *aen, *tmp; mutex_lock(&ctrl->lock); list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { list_del(&aen->entry); kfree(aen); } mutex_unlock(&ctrl->lock); } static void nvmet_async_event_work(struct work_struct *work) { struct nvmet_ctrl *ctrl = container_of(work, struct nvmet_ctrl, async_event_work); nvmet_async_events_process(ctrl); } void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, u8 event_info, u8 log_page) { struct nvmet_async_event *aen; aen = kmalloc(sizeof(*aen), GFP_KERNEL); if (!aen) return; aen->event_type = event_type; aen->event_info = event_info; aen->log_page = log_page; mutex_lock(&ctrl->lock); list_add_tail(&aen->entry, &ctrl->async_events); mutex_unlock(&ctrl->lock); queue_work(nvmet_wq, &ctrl->async_event_work); } static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) { u32 i; mutex_lock(&ctrl->lock); if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES) goto out_unlock; for (i = 0; i < ctrl->nr_changed_ns; i++) { if (ctrl->changed_ns_list[i] == nsid) goto out_unlock; } if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) { ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff); ctrl->nr_changed_ns = U32_MAX; goto out_unlock; } ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid; out_unlock: mutex_unlock(&ctrl->lock); } void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) { struct nvmet_ctrl *ctrl; lockdep_assert_held(&subsys->lock); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) continue; nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, NVME_AER_NOTICE_NS_CHANGED, NVME_LOG_CHANGED_NS); } } void nvmet_send_ana_event(struct nvmet_subsys *subsys, struct nvmet_port *port) { struct nvmet_ctrl *ctrl; mutex_lock(&subsys->lock); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { if (port && ctrl->port != port) continue; if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) continue; nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, NVME_AER_NOTICE_ANA, NVME_LOG_ANA); } mutex_unlock(&subsys->lock); } void nvmet_port_send_ana_event(struct nvmet_port *port) { struct nvmet_subsys_link *p; down_read(&nvmet_config_sem); list_for_each_entry(p, &port->subsystems, entry) nvmet_send_ana_event(p->subsys, port); up_read(&nvmet_config_sem); } int nvmet_register_transport(const struct nvmet_fabrics_ops *ops) { int ret = 0; down_write(&nvmet_config_sem); if (nvmet_transports[ops->type]) ret = -EINVAL; else nvmet_transports[ops->type] = ops; up_write(&nvmet_config_sem); return ret; } EXPORT_SYMBOL_GPL(nvmet_register_transport); void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops) { down_write(&nvmet_config_sem); nvmet_transports[ops->type] = NULL; up_write(&nvmet_config_sem); } EXPORT_SYMBOL_GPL(nvmet_unregister_transport); void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys) { struct nvmet_ctrl *ctrl; mutex_lock(&subsys->lock); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { if (ctrl->port == port) ctrl->ops->delete_ctrl(ctrl); } mutex_unlock(&subsys->lock); } int nvmet_enable_port(struct nvmet_port *port) { const struct nvmet_fabrics_ops *ops; int ret; lockdep_assert_held(&nvmet_config_sem); ops = nvmet_transports[port->disc_addr.trtype]; if (!ops) { up_write(&nvmet_config_sem); request_module("nvmet-transport-%d", port->disc_addr.trtype); down_write(&nvmet_config_sem); ops = nvmet_transports[port->disc_addr.trtype]; if (!ops) { pr_err("transport type %d not supported\n", port->disc_addr.trtype); return -EINVAL; } } if (!try_module_get(ops->owner)) return -EINVAL; /* * If the user requested PI support and the transport isn't pi capable, * don't enable the port. */ if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) { pr_err("T10-PI is not supported by transport type %d\n", port->disc_addr.trtype); ret = -EINVAL; goto out_put; } ret = ops->add_port(port); if (ret) goto out_put; /* If the transport didn't set inline_data_size, then disable it. */ if (port->inline_data_size < 0) port->inline_data_size = 0; port->enabled = true; port->tr_ops = ops; return 0; out_put: module_put(ops->owner); return ret; } void nvmet_disable_port(struct nvmet_port *port) { const struct nvmet_fabrics_ops *ops; lockdep_assert_held(&nvmet_config_sem); port->enabled = false; port->tr_ops = NULL; ops = nvmet_transports[port->disc_addr.trtype]; ops->remove_port(port); module_put(ops->owner); } static void nvmet_keep_alive_timer(struct work_struct *work) { struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), struct nvmet_ctrl, ka_work); bool reset_tbkas = ctrl->reset_tbkas; ctrl->reset_tbkas = false; if (reset_tbkas) { pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", ctrl->cntlid); queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); return; } pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", ctrl->cntlid, ctrl->kato); nvmet_ctrl_fatal_error(ctrl); } void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { if (unlikely(ctrl->kato == 0)) return; pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); } void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { if (unlikely(ctrl->kato == 0)) return; pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); } u16 nvmet_req_find_ns(struct nvmet_req *req) { u32 nsid = le32_to_cpu(req->cmd->common.nsid); req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid); if (unlikely(!req->ns)) { req->error_loc = offsetof(struct nvme_common_command, nsid); return NVME_SC_INVALID_NS | NVME_SC_DNR; } percpu_ref_get(&req->ns->ref); return NVME_SC_SUCCESS; } static void nvmet_destroy_namespace(struct percpu_ref *ref) { struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref); complete(&ns->disable_done); } void nvmet_put_namespace(struct nvmet_ns *ns) { percpu_ref_put(&ns->ref); } static void nvmet_ns_dev_disable(struct nvmet_ns *ns) { nvmet_bdev_ns_disable(ns); nvmet_file_ns_disable(ns); } static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) { int ret; struct pci_dev *p2p_dev; if (!ns->use_p2pmem) return 0; if (!ns->bdev) { pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n"); return -EINVAL; } if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { pr_err("peer-to-peer DMA is not supported by the driver of %s\n", ns->device_path); return -EINVAL; } if (ns->p2p_dev) { ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true); if (ret < 0) return -EINVAL; } else { /* * Right now we just check that there is p2pmem available so * we can report an error to the user right away if there * is not. We'll find the actual device to use once we * setup the controller when the port's device is available. */ p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns)); if (!p2p_dev) { pr_err("no peer-to-peer memory is available for %s\n", ns->device_path); return -EINVAL; } pci_dev_put(p2p_dev); } return 0; } /* * Note: ctrl->subsys->lock should be held when calling this function */ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, struct nvmet_ns *ns) { struct device *clients[2]; struct pci_dev *p2p_dev; int ret; if (!ctrl->p2p_client || !ns->use_p2pmem) return; if (ns->p2p_dev) { ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true); if (ret < 0) return; p2p_dev = pci_dev_get(ns->p2p_dev); } else { clients[0] = ctrl->p2p_client; clients[1] = nvmet_ns_dev(ns); p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients)); if (!p2p_dev) { pr_err("no peer-to-peer memory is available that's supported by %s and %s\n", dev_name(ctrl->p2p_client), ns->device_path); return; } } ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev); if (ret < 0) pci_dev_put(p2p_dev); pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev), ns->nsid); } bool nvmet_ns_revalidate(struct nvmet_ns *ns) { loff_t oldsize = ns->size; if (ns->bdev) nvmet_bdev_ns_revalidate(ns); else nvmet_file_ns_revalidate(ns); return oldsize != ns->size; } int nvmet_ns_enable(struct nvmet_ns *ns) { struct nvmet_subsys *subsys = ns->subsys; struct nvmet_ctrl *ctrl; int ret; mutex_lock(&subsys->lock); ret = 0; if (nvmet_is_passthru_subsys(subsys)) { pr_info("cannot enable both passthru and regular namespaces for a single subsystem"); goto out_unlock; } if (ns->enabled) goto out_unlock; ret = -EMFILE; if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) goto out_unlock; ret = nvmet_bdev_ns_enable(ns); if (ret == -ENOTBLK) ret = nvmet_file_ns_enable(ns); if (ret) goto out_unlock; ret = nvmet_p2pmem_ns_enable(ns); if (ret) goto out_dev_disable; list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) nvmet_p2pmem_ns_add_p2p(ctrl, ns); ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL); if (ret) goto out_dev_put; if (ns->nsid > subsys->max_nsid) subsys->max_nsid = ns->nsid; ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL); if (ret) goto out_restore_subsys_maxnsid; subsys->nr_namespaces++; nvmet_ns_changed(subsys, ns->nsid); ns->enabled = true; ret = 0; out_unlock: mutex_unlock(&subsys->lock); return ret; out_restore_subsys_maxnsid: subsys->max_nsid = nvmet_max_nsid(subsys); percpu_ref_exit(&ns->ref); out_dev_put: list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); out_dev_disable: nvmet_ns_dev_disable(ns); goto out_unlock; } void nvmet_ns_disable(struct nvmet_ns *ns) { struct nvmet_subsys *subsys = ns->subsys; struct nvmet_ctrl *ctrl; mutex_lock(&subsys->lock); if (!ns->enabled) goto out_unlock; ns->enabled = false; xa_erase(&ns->subsys->namespaces, ns->nsid); if (ns->nsid == subsys->max_nsid) subsys->max_nsid = nvmet_max_nsid(subsys); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); mutex_unlock(&subsys->lock); /* * Now that we removed the namespaces from the lookup list, we * can kill the per_cpu ref and wait for any remaining references * to be dropped, as well as a RCU grace period for anyone only * using the namepace under rcu_read_lock(). Note that we can't * use call_rcu here as we need to ensure the namespaces have * been fully destroyed before unloading the module. */ percpu_ref_kill(&ns->ref); synchronize_rcu(); wait_for_completion(&ns->disable_done); percpu_ref_exit(&ns->ref); mutex_lock(&subsys->lock); subsys->nr_namespaces--; nvmet_ns_changed(subsys, ns->nsid); nvmet_ns_dev_disable(ns); out_unlock: mutex_unlock(&subsys->lock); } void nvmet_ns_free(struct nvmet_ns *ns) { nvmet_ns_disable(ns); down_write(&nvmet_ana_sem); nvmet_ana_group_enabled[ns->anagrpid]--; up_write(&nvmet_ana_sem); kfree(ns->device_path); kfree(ns); } struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) { struct nvmet_ns *ns; ns = kzalloc(sizeof(*ns), GFP_KERNEL); if (!ns) return NULL; init_completion(&ns->disable_done); ns->nsid = nsid; ns->subsys = subsys; down_write(&nvmet_ana_sem); ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; nvmet_ana_group_enabled[ns->anagrpid]++; up_write(&nvmet_ana_sem); uuid_gen(&ns->uuid); ns->buffered_io = false; ns->csi = NVME_CSI_NVM; return ns; } static void nvmet_update_sq_head(struct nvmet_req *req) { if (req->sq->size) { u32 old_sqhd, new_sqhd; old_sqhd = READ_ONCE(req->sq->sqhd); do { new_sqhd = (old_sqhd + 1) % req->sq->size; } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd)); } req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); } static void nvmet_set_error(struct nvmet_req *req, u16 status) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_error_slot *new_error_slot; unsigned long flags; req->cqe->status = cpu_to_le16(status << 1); if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) return; spin_lock_irqsave(&ctrl->error_lock, flags); ctrl->err_counter++; new_error_slot = &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); new_error_slot->sqid = cpu_to_le16(req->sq->qid); new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id); new_error_slot->status_field = cpu_to_le16(status << 1); new_error_slot->param_error_location = cpu_to_le16(req->error_loc); new_error_slot->lba = cpu_to_le64(req->error_slba); new_error_slot->nsid = req->cmd->common.nsid; spin_unlock_irqrestore(&ctrl->error_lock, flags); /* set the more bit for this request */ req->cqe->status |= cpu_to_le16(1 << 14); } static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { struct nvmet_ns *ns = req->ns; if (!req->sq->sqhd_disabled) nvmet_update_sq_head(req); req->cqe->sq_id = cpu_to_le16(req->sq->qid); req->cqe->command_id = req->cmd->common.command_id; if (unlikely(status)) nvmet_set_error(req, status); trace_nvmet_req_complete(req); req->ops->queue_response(req); if (ns) nvmet_put_namespace(ns); } void nvmet_req_complete(struct nvmet_req *req, u16 status) { struct nvmet_sq *sq = req->sq; __nvmet_req_complete(req, status); percpu_ref_put(&sq->ref); } EXPORT_SYMBOL_GPL(nvmet_req_complete); void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size) { cq->qid = qid; cq->size = size; } void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size) { sq->sqhd = 0; sq->qid = qid; sq->size = size; ctrl->sqs[qid] = sq; } static void nvmet_confirm_sq(struct percpu_ref *ref) { struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); complete(&sq->confirm_done); } void nvmet_sq_destroy(struct nvmet_sq *sq) { struct nvmet_ctrl *ctrl = sq->ctrl; /* * If this is the admin queue, complete all AERs so that our * queue doesn't have outstanding requests on it. */ if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) nvmet_async_events_failall(ctrl); percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); wait_for_completion(&sq->confirm_done); wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); nvmet_auth_sq_free(sq); if (ctrl) { /* * The teardown flow may take some time, and the host may not * send us keep-alive during this period, hence reset the * traffic based keep-alive timer so we don't trigger a * controller teardown as a result of a keep-alive expiration. */ ctrl->reset_tbkas = true; sq->ctrl->sqs[sq->qid] = NULL; nvmet_ctrl_put(ctrl); sq->ctrl = NULL; /* allows reusing the queue later */ } } EXPORT_SYMBOL_GPL(nvmet_sq_destroy); static void nvmet_sq_free(struct percpu_ref *ref) { struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); complete(&sq->free_done); } int nvmet_sq_init(struct nvmet_sq *sq) { int ret; ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); if (ret) { pr_err("percpu_ref init failed!\n"); return ret; } init_completion(&sq->free_done); init_completion(&sq->confirm_done); nvmet_auth_sq_init(sq); return 0; } EXPORT_SYMBOL_GPL(nvmet_sq_init); static inline u16 nvmet_check_ana_state(struct nvmet_port *port, struct nvmet_ns *ns) { enum nvme_ana_state state = port->ana_state[ns->anagrpid]; if (unlikely(state == NVME_ANA_INACCESSIBLE)) return NVME_SC_ANA_INACCESSIBLE; if (unlikely(state == NVME_ANA_PERSISTENT_LOSS)) return NVME_SC_ANA_PERSISTENT_LOSS; if (unlikely(state == NVME_ANA_CHANGE)) return NVME_SC_ANA_TRANSITION; return 0; } static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) { if (unlikely(req->ns->readonly)) { switch (req->cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_flush: break; default: return NVME_SC_NS_WRITE_PROTECTED; } } return 0; } static u16 nvmet_parse_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; u16 ret; if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_io_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; ret = nvmet_check_ctrl_status(req); if (unlikely(ret)) return ret; if (nvmet_is_passthru_req(req)) return nvmet_parse_passthru_io_cmd(req); ret = nvmet_req_find_ns(req); if (unlikely(ret)) return ret; ret = nvmet_check_ana_state(req->port, req->ns); if (unlikely(ret)) { req->error_loc = offsetof(struct nvme_common_command, nsid); return ret; } ret = nvmet_io_cmd_check_access(req); if (unlikely(ret)) { req->error_loc = offsetof(struct nvme_common_command, nsid); return ret; } switch (req->ns->csi) { case NVME_CSI_NVM: if (req->ns->file) return nvmet_file_parse_io_cmd(req); return nvmet_bdev_parse_io_cmd(req); case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) return nvmet_bdev_zns_parse_io_cmd(req); return NVME_SC_INVALID_IO_CMD_SET; default: return NVME_SC_INVALID_IO_CMD_SET; } } bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) { u8 flags = req->cmd->common.flags; u16 status; req->cq = cq; req->sq = sq; req->ops = ops; req->sg = NULL; req->metadata_sg = NULL; req->sg_cnt = 0; req->metadata_sg_cnt = 0; req->transfer_len = 0; req->metadata_len = 0; req->cqe->status = 0; req->cqe->sq_head = 0; req->ns = NULL; req->error_loc = NVMET_NO_ERROR_LOC; req->error_slba = 0; /* no support for fused commands yet */ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { req->error_loc = offsetof(struct nvme_common_command, flags); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; } /* * For fabrics, PSDT field shall describe metadata pointer (MPTR) that * contains an address of a single contiguous physical buffer that is * byte aligned. */ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { req->error_loc = offsetof(struct nvme_common_command, flags); status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; } if (unlikely(!req->sq->ctrl)) /* will return an error for any non-connect command: */ status = nvmet_parse_connect_cmd(req); else if (likely(req->sq->qid != 0)) status = nvmet_parse_io_cmd(req); else status = nvmet_parse_admin_cmd(req); if (status) goto fail; trace_nvmet_req_init(req, req->cmd); if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; } if (sq->ctrl) sq->ctrl->reset_tbkas = true; return true; fail: __nvmet_req_complete(req, status); return false; } EXPORT_SYMBOL_GPL(nvmet_req_init); void nvmet_req_uninit(struct nvmet_req *req) { percpu_ref_put(&req->sq->ref); if (req->ns) nvmet_put_namespace(req->ns); } EXPORT_SYMBOL_GPL(nvmet_req_uninit); bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) { if (unlikely(len != req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); return false; } return true; } EXPORT_SYMBOL_GPL(nvmet_check_transfer_len); bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) { if (unlikely(data_len > req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); return false; } return true; } static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) { return req->transfer_len - req->metadata_len; } static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev, struct nvmet_req *req) { req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, nvmet_data_transfer_len(req)); if (!req->sg) goto out_err; if (req->metadata_len) { req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->metadata_sg_cnt, req->metadata_len); if (!req->metadata_sg) goto out_free_sg; } req->p2p_dev = p2p_dev; return 0; out_free_sg: pci_p2pmem_free_sgl(req->p2p_dev, req->sg); out_err: return -ENOMEM; } static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req) { if (!IS_ENABLED(CONFIG_PCI_P2PDMA) || !req->sq->ctrl || !req->sq->qid || !req->ns) return NULL; return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); } int nvmet_req_alloc_sgls(struct nvmet_req *req) { struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req); if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req)) return 0; req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, &req->sg_cnt); if (unlikely(!req->sg)) goto out; if (req->metadata_len) { req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL, &req->metadata_sg_cnt); if (unlikely(!req->metadata_sg)) goto out_free; } return 0; out_free: sgl_free(req->sg); out: return -ENOMEM; } EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls); void nvmet_req_free_sgls(struct nvmet_req *req) { if (req->p2p_dev) { pci_p2pmem_free_sgl(req->p2p_dev, req->sg); if (req->metadata_sg) pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); req->p2p_dev = NULL; } else { sgl_free(req->sg); if (req->metadata_sg) sgl_free(req->metadata_sg); } req->sg = NULL; req->metadata_sg = NULL; req->sg_cnt = 0; req->metadata_sg_cnt = 0; } EXPORT_SYMBOL_GPL(nvmet_req_free_sgls); static inline bool nvmet_cc_en(u32 cc) { return (cc >> NVME_CC_EN_SHIFT) & 0x1; } static inline u8 nvmet_cc_css(u32 cc) { return (cc >> NVME_CC_CSS_SHIFT) & 0x7; } static inline u8 nvmet_cc_mps(u32 cc) { return (cc >> NVME_CC_MPS_SHIFT) & 0xf; } static inline u8 nvmet_cc_ams(u32 cc) { return (cc >> NVME_CC_AMS_SHIFT) & 0x7; } static inline u8 nvmet_cc_shn(u32 cc) { return (cc >> NVME_CC_SHN_SHIFT) & 0x3; } static inline u8 nvmet_cc_iosqes(u32 cc) { return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; } static inline u8 nvmet_cc_iocqes(u32 cc) { return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; } static inline bool nvmet_css_supported(u8 cc_css) { switch (cc_css << NVME_CC_CSS_SHIFT) { case NVME_CC_CSS_NVM: case NVME_CC_CSS_CSI: return true; default: return false; } } static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); /* * Only I/O controllers should verify iosqes,iocqes. * Strictly speaking, the spec says a discovery controller * should verify iosqes,iocqes are zeroed, however that * would break backwards compatibility, so don't enforce it. */ if (!nvmet_is_disc_subsys(ctrl->subsys) && (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { ctrl->csts = NVME_CSTS_CFS; return; } if (nvmet_cc_mps(ctrl->cc) != 0 || nvmet_cc_ams(ctrl->cc) != 0 || !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) { ctrl->csts = NVME_CSTS_CFS; return; } ctrl->csts = NVME_CSTS_RDY; /* * Controllers that are not yet enabled should not really enforce the * keep alive timeout, but we still want to track a timeout and cleanup * in case a host died before it enabled the controller. Hence, simply * reset the keep alive timer when the controller is enabled. */ if (ctrl->kato) mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); /* XXX: tear down queues? */ ctrl->csts &= ~NVME_CSTS_RDY; ctrl->cc = 0; } void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) { u32 old; mutex_lock(&ctrl->lock); old = ctrl->cc; ctrl->cc = new; if (nvmet_cc_en(new) && !nvmet_cc_en(old)) nvmet_start_ctrl(ctrl); if (!nvmet_cc_en(new) && nvmet_cc_en(old)) nvmet_clear_ctrl(ctrl); if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) { nvmet_clear_ctrl(ctrl); ctrl->csts |= NVME_CSTS_SHST_CMPLT; } if (!nvmet_cc_shn(new) && nvmet_cc_shn(old)) ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; mutex_unlock(&ctrl->lock); } static void nvmet_init_cap(struct nvmet_ctrl *ctrl) { /* command sets supported: NVMe command set: */ ctrl->cap = (1ULL << 37); /* Controller supports one or more I/O Command Sets */ ctrl->cap |= (1ULL << 43); /* CC.EN timeout in 500msec units: */ ctrl->cap |= (15ULL << 24); /* maximum queue entries supported: */ if (ctrl->ops->get_max_queue_size) ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; else ctrl->cap |= NVMET_QUEUE_SIZE - 1; if (nvmet_is_passthru_subsys(ctrl->subsys)) nvmet_passthrough_override_cap(ctrl); } struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_req *req) { struct nvmet_ctrl *ctrl = NULL; struct nvmet_subsys *subsys; subsys = nvmet_find_get_subsys(req->port, subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); goto out; } mutex_lock(&subsys->lock); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { if (ctrl->cntlid == cntlid) { if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { pr_warn("hostnqn mismatch.\n"); continue; } if (!kref_get_unless_zero(&ctrl->ref)) continue; /* ctrl found */ goto found; } } ctrl = NULL; /* ctrl not found */ pr_warn("could not find controller %d for subsys %s / host %s\n", cntlid, subsysnqn, hostnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); found: mutex_unlock(&subsys->lock); nvmet_subsys_put(subsys); out: return ctrl; } u16 nvmet_check_ctrl_status(struct nvmet_req *req) { if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { pr_err("got cmd %d while CC.EN == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; } if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; } if (unlikely(!nvmet_check_auth_status(req))) { pr_warn("qid %d not authenticated\n", req->sq->qid); return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; } return 0; } bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) { struct nvmet_host_link *p; lockdep_assert_held(&nvmet_config_sem); if (subsys->allow_any_host) return true; if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */ return true; list_for_each_entry(p, &subsys->hosts, entry) { if (!strcmp(nvmet_host_name(p->host), hostnqn)) return true; } return false; } /* * Note: ctrl->subsys->lock should be held when calling this function */ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, struct nvmet_req *req) { struct nvmet_ns *ns; unsigned long idx; if (!req->p2p_client) return; ctrl->p2p_client = get_device(req->p2p_client); xa_for_each(&ctrl->subsys->namespaces, idx, ns) nvmet_p2pmem_ns_add_p2p(ctrl, ns); } /* * Note: ctrl->subsys->lock should be held when calling this function */ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) { struct radix_tree_iter iter; void __rcu **slot; radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) pci_dev_put(radix_tree_deref_slot(slot)); put_device(ctrl->p2p_client); } static void nvmet_fatal_error_handler(struct work_struct *work) { struct nvmet_ctrl *ctrl = container_of(work, struct nvmet_ctrl, fatal_err_work); pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); ctrl->ops->delete_ctrl(ctrl); } u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) { struct nvmet_subsys *subsys; struct nvmet_ctrl *ctrl; int ret; u16 status; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; subsys = nvmet_find_get_subsys(req->port, subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); req->error_loc = offsetof(struct nvme_common_command, dptr); goto out; } down_read(&nvmet_config_sem); if (!nvmet_host_allowed(subsys, hostnqn)) { pr_info("connect by host %s for subsystem %s not allowed\n", hostnqn, subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; req->error_loc = offsetof(struct nvme_common_command, dptr); goto out_put_subsystem; } up_read(&nvmet_config_sem); status = NVME_SC_INTERNAL; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) goto out_put_subsystem; mutex_init(&ctrl->lock); ctrl->port = req->port; ctrl->ops = req->ops; #ifdef CONFIG_NVME_TARGET_PASSTHRU /* By default, set loop targets to clear IDS by default */ if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP) subsys->clear_ids = 1; #endif INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_LIST_HEAD(&ctrl->async_events); INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); kref_init(&ctrl->ref); ctrl->subsys = subsys; nvmet_init_cap(ctrl); WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES, sizeof(__le32), GFP_KERNEL); if (!ctrl->changed_ns_list) goto out_free_ctrl; ctrl->sqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_sq *), GFP_KERNEL); if (!ctrl->sqs) goto out_free_changed_ns_list; if (subsys->cntlid_min > subsys->cntlid_max) goto out_free_sqs; ret = ida_alloc_range(&cntlid_ida, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; goto out_free_sqs; } ctrl->cntlid = ret; /* * Discovery controllers may use some arbitrary high value * in order to cleanup stale discovery sessions */ if (nvmet_is_disc_subsys(ctrl->subsys) && !kato) kato = NVMET_DISC_KATO_MS; /* keep-alive timeout in seconds */ ctrl->kato = DIV_ROUND_UP(kato, 1000); ctrl->err_counter = 0; spin_lock_init(&ctrl->error_lock); nvmet_start_keep_alive_timer(ctrl); mutex_lock(&subsys->lock); list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); nvmet_setup_p2p_ns_map(ctrl, req); mutex_unlock(&subsys->lock); *ctrlp = ctrl; return 0; out_free_sqs: kfree(ctrl->sqs); out_free_changed_ns_list: kfree(ctrl->changed_ns_list); out_free_ctrl: kfree(ctrl); out_put_subsystem: nvmet_subsys_put(subsys); out: return status; } static void nvmet_ctrl_free(struct kref *ref) { struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); struct nvmet_subsys *subsys = ctrl->subsys; mutex_lock(&subsys->lock); nvmet_release_p2p_ns_map(ctrl); list_del(&ctrl->subsys_entry); mutex_unlock(&subsys->lock); nvmet_stop_keep_alive_timer(ctrl); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fatal_err_work); nvmet_destroy_auth(ctrl); ida_free(&cntlid_ida, ctrl->cntlid); nvmet_async_events_free(ctrl); kfree(ctrl->sqs); kfree(ctrl->changed_ns_list); kfree(ctrl); nvmet_subsys_put(subsys); } void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) { kref_put(&ctrl->ref, nvmet_ctrl_free); } void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) { mutex_lock(&ctrl->lock); if (!(ctrl->csts & NVME_CSTS_CFS)) { ctrl->csts |= NVME_CSTS_CFS; queue_work(nvmet_wq, &ctrl->fatal_err_work); } mutex_unlock(&ctrl->lock); } EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, const char *subsysnqn) { struct nvmet_subsys_link *p; if (!port) return NULL; if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) { if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) return NULL; return nvmet_disc_subsys; } down_read(&nvmet_config_sem); list_for_each_entry(p, &port->subsystems, entry) { if (!strncmp(p->subsys->subsysnqn, subsysnqn, NVMF_NQN_SIZE)) { if (!kref_get_unless_zero(&p->subsys->ref)) break; up_read(&nvmet_config_sem); return p->subsys; } } up_read(&nvmet_config_sem); return NULL; } struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, enum nvme_subsys_type type) { struct nvmet_subsys *subsys; char serial[NVMET_SN_MAX_SIZE / 2]; int ret; subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); if (!subsys) return ERR_PTR(-ENOMEM); subsys->ver = NVMET_DEFAULT_VS; /* generate a random serial number as our controllers are ephemeral: */ get_random_bytes(&serial, sizeof(serial)); bin2hex(subsys->serial, &serial, sizeof(serial)); subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); if (!subsys->model_number) { ret = -ENOMEM; goto free_subsys; } subsys->ieee_oui = 0; subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL); if (!subsys->firmware_rev) { ret = -ENOMEM; goto free_mn; } switch (type) { case NVME_NQN_NVME: subsys->max_qid = NVMET_NR_QUEUES; break; case NVME_NQN_DISC: case NVME_NQN_CURR: subsys->max_qid = 0; break; default: pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); ret = -EINVAL; goto free_fr; } subsys->type = type; subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, GFP_KERNEL); if (!subsys->subsysnqn) { ret = -ENOMEM; goto free_fr; } subsys->cntlid_min = NVME_CNTLID_MIN; subsys->cntlid_max = NVME_CNTLID_MAX; kref_init(&subsys->ref); mutex_init(&subsys->lock); xa_init(&subsys->namespaces); INIT_LIST_HEAD(&subsys->ctrls); INIT_LIST_HEAD(&subsys->hosts); return subsys; free_fr: kfree(subsys->firmware_rev); free_mn: kfree(subsys->model_number); free_subsys: kfree(subsys); return ERR_PTR(ret); } static void nvmet_subsys_free(struct kref *ref) { struct nvmet_subsys *subsys = container_of(ref, struct nvmet_subsys, ref); WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); xa_destroy(&subsys->namespaces); nvmet_passthru_subsys_free(subsys); kfree(subsys->subsysnqn); kfree(subsys->model_number); kfree(subsys->firmware_rev); kfree(subsys); } void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) { struct nvmet_ctrl *ctrl; mutex_lock(&subsys->lock); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) ctrl->ops->delete_ctrl(ctrl); mutex_unlock(&subsys->lock); } void nvmet_subsys_put(struct nvmet_subsys *subsys) { kref_put(&subsys->ref, nvmet_subsys_free); } static int __init nvmet_init(void) { int error = -ENOMEM; nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; nvmet_bvec_cache = kmem_cache_create("nvmet-bvec", NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0, SLAB_HWCACHE_ALIGN, NULL); if (!nvmet_bvec_cache) return -ENOMEM; zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0); if (!zbd_wq) goto out_destroy_bvec_cache; buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", WQ_MEM_RECLAIM, 0); if (!buffered_io_wq) goto out_free_zbd_work_queue; nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); if (!nvmet_wq) goto out_free_buffered_work_queue; error = nvmet_init_discovery(); if (error) goto out_free_nvmet_work_queue; error = nvmet_init_configfs(); if (error) goto out_exit_discovery; return 0; out_exit_discovery: nvmet_exit_discovery(); out_free_nvmet_work_queue: destroy_workqueue(nvmet_wq); out_free_buffered_work_queue: destroy_workqueue(buffered_io_wq); out_free_zbd_work_queue: destroy_workqueue(zbd_wq); out_destroy_bvec_cache: kmem_cache_destroy(nvmet_bvec_cache); return error; } static void __exit nvmet_exit(void) { nvmet_exit_configfs(); nvmet_exit_discovery(); ida_destroy(&cntlid_ida); destroy_workqueue(nvmet_wq); destroy_workqueue(buffered_io_wq); destroy_workqueue(zbd_wq); kmem_cache_destroy(nvmet_bvec_cache); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); } module_init(nvmet_init); module_exit(nvmet_exit); MODULE_LICENSE("GPL v2");
linux-master
drivers/nvme/target/core.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics TCP target. * Copyright (c) 2018 Lightbits Labs. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/nvme-tcp.h> #include <net/sock.h> #include <net/tcp.h> #include <linux/inet.h> #include <linux/llist.h> #include <crypto/hash.h> #include <trace/events/sock.h> #include "nvmet.h" #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) static int param_store_val(const char *str, int *val, int min, int max) { int ret, new_val; ret = kstrtoint(str, 10, &new_val); if (ret) return -EINVAL; if (new_val < min || new_val > max) return -EINVAL; *val = new_val; return 0; } static int set_params(const char *str, const struct kernel_param *kp) { return param_store_val(str, kp->arg, 0, INT_MAX); } static const struct kernel_param_ops set_param_ops = { .set = set_params, .get = param_get_int, }; /* Define the socket priority to use for connections were it is desirable * that the NIC consider performing optimized packet processing or filtering. * A non-zero value being sufficient to indicate general consideration of any * possible optimization. Making it a module param allows for alternative * values that may be unique for some NIC implementations. */ static int so_priority; device_param_cb(so_priority, &set_param_ops, &so_priority, 0644); MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0"); /* Define a time period (in usecs) that io_work() shall sample an activated * queue before determining it to be idle. This optional module behavior * can enable NIC solutions that support socket optimized packet processing * using advanced interrupt moderation techniques. */ static int idle_poll_period_usecs; device_param_cb(idle_poll_period_usecs, &set_param_ops, &idle_poll_period_usecs, 0644); MODULE_PARM_DESC(idle_poll_period_usecs, "nvmet tcp io_work poll till idle time period in usecs: Default 0"); #define NVMET_TCP_RECV_BUDGET 8 #define NVMET_TCP_SEND_BUDGET 8 #define NVMET_TCP_IO_WORK_BUDGET 64 enum nvmet_tcp_send_state { NVMET_TCP_SEND_DATA_PDU, NVMET_TCP_SEND_DATA, NVMET_TCP_SEND_R2T, NVMET_TCP_SEND_DDGST, NVMET_TCP_SEND_RESPONSE }; enum nvmet_tcp_recv_state { NVMET_TCP_RECV_PDU, NVMET_TCP_RECV_DATA, NVMET_TCP_RECV_DDGST, NVMET_TCP_RECV_ERR, }; enum { NVMET_TCP_F_INIT_FAILED = (1 << 0), }; struct nvmet_tcp_cmd { struct nvmet_tcp_queue *queue; struct nvmet_req req; struct nvme_tcp_cmd_pdu *cmd_pdu; struct nvme_tcp_rsp_pdu *rsp_pdu; struct nvme_tcp_data_pdu *data_pdu; struct nvme_tcp_r2t_pdu *r2t_pdu; u32 rbytes_done; u32 wbytes_done; u32 pdu_len; u32 pdu_recv; int sg_idx; struct msghdr recv_msg; struct bio_vec *iov; u32 flags; struct list_head entry; struct llist_node lentry; /* send state */ u32 offset; struct scatterlist *cur_sg; enum nvmet_tcp_send_state state; __le32 exp_ddgst; __le32 recv_ddgst; }; enum nvmet_tcp_queue_state { NVMET_TCP_Q_CONNECTING, NVMET_TCP_Q_LIVE, NVMET_TCP_Q_DISCONNECTING, }; struct nvmet_tcp_queue { struct socket *sock; struct nvmet_tcp_port *port; struct work_struct io_work; struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; /* send state */ struct nvmet_tcp_cmd *cmds; unsigned int nr_cmds; struct list_head free_list; struct llist_head resp_list; struct list_head resp_send_list; int send_list_len; struct nvmet_tcp_cmd *snd_cmd; /* recv state */ int offset; int left; enum nvmet_tcp_recv_state rcv_state; struct nvmet_tcp_cmd *cmd; union nvme_tcp_pdu pdu; /* digest state */ bool hdr_digest; bool data_digest; struct ahash_request *snd_hash; struct ahash_request *rcv_hash; unsigned long poll_end; spinlock_t state_lock; enum nvmet_tcp_queue_state state; struct sockaddr_storage sockaddr; struct sockaddr_storage sockaddr_peer; struct work_struct release_work; int idx; struct list_head queue_list; struct nvmet_tcp_cmd connect; struct page_frag_cache pf_cache; void (*data_ready)(struct sock *); void (*state_change)(struct sock *); void (*write_space)(struct sock *); }; struct nvmet_tcp_port { struct socket *sock; struct work_struct accept_work; struct nvmet_port *nport; struct sockaddr_storage addr; void (*data_ready)(struct sock *); }; static DEFINE_IDA(nvmet_tcp_queue_ida); static LIST_HEAD(nvmet_tcp_queue_list); static DEFINE_MUTEX(nvmet_tcp_queue_mutex); static struct workqueue_struct *nvmet_tcp_wq; static const struct nvmet_fabrics_ops nvmet_tcp_ops; static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) { if (unlikely(!queue->nr_cmds)) { /* We didn't allocate cmds yet, send 0xffff */ return USHRT_MAX; } return cmd - queue->cmds; } static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) { return nvme_is_write(cmd->req.cmd) && cmd->rbytes_done < cmd->req.transfer_len; } static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) { return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; } static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) { return !nvme_is_write(cmd->req.cmd) && cmd->req.transfer_len > 0 && !cmd->req.cqe->status; } static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) { return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && !cmd->rbytes_done; } static inline struct nvmet_tcp_cmd * nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmd; cmd = list_first_entry_or_null(&queue->free_list, struct nvmet_tcp_cmd, entry); if (!cmd) return NULL; list_del_init(&cmd->entry); cmd->rbytes_done = cmd->wbytes_done = 0; cmd->pdu_len = 0; cmd->pdu_recv = 0; cmd->iov = NULL; cmd->flags = 0; return cmd; } static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) { if (unlikely(cmd == &cmd->queue->connect)) return; list_add_tail(&cmd->entry, &cmd->queue->free_list); } static inline int queue_cpu(struct nvmet_tcp_queue *queue) { return queue->sock->sk->sk_incoming_cpu; } static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) { return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; } static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) { return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; } static inline void nvmet_tcp_hdgst(struct ahash_request *hash, void *pdu, size_t len) { struct scatterlist sg; sg_init_one(&sg, pdu, len); ahash_request_set_crypt(hash, &sg, pdu + len, len); crypto_ahash_digest(hash); } static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, void *pdu, size_t len) { struct nvme_tcp_hdr *hdr = pdu; __le32 recv_digest; __le32 exp_digest; if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { pr_err("queue %d: header digest enabled but no header digest\n", queue->idx); return -EPROTO; } recv_digest = *(__le32 *)(pdu + hdr->hlen); nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); exp_digest = *(__le32 *)(pdu + hdr->hlen); if (recv_digest != exp_digest) { pr_err("queue %d: header digest error: recv %#x expected %#x\n", queue->idx, le32_to_cpu(recv_digest), le32_to_cpu(exp_digest)); return -EPROTO; } return 0; } static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) { struct nvme_tcp_hdr *hdr = pdu; u8 digest_len = nvmet_tcp_hdgst_len(queue); u32 len; len = le32_to_cpu(hdr->plen) - hdr->hlen - (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { pr_err("queue %d: data digest flag is cleared\n", queue->idx); return -EPROTO; } return 0; } static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) { kfree(cmd->iov); sgl_free(cmd->req.sg); cmd->iov = NULL; cmd->req.sg = NULL; } static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct bio_vec *iov = cmd->iov; struct scatterlist *sg; u32 length, offset, sg_offset; int nr_pages; length = cmd->pdu_len; nr_pages = DIV_ROUND_UP(length, PAGE_SIZE); offset = cmd->rbytes_done; cmd->sg_idx = offset / PAGE_SIZE; sg_offset = offset % PAGE_SIZE; sg = &cmd->req.sg[cmd->sg_idx]; while (length) { u32 iov_len = min_t(u32, length, sg->length - sg_offset); bvec_set_page(iov, sg_page(sg), iov_len, sg->offset + sg_offset); length -= iov_len; sg = sg_next(sg); iov++; sg_offset = 0; } iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, nr_pages, cmd->pdu_len); } static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) { queue->rcv_state = NVMET_TCP_RECV_ERR; if (queue->nvme_sq.ctrl) nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); else kernel_sock_shutdown(queue->sock, SHUT_RDWR); } static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) { if (status == -EPIPE || status == -ECONNRESET) kernel_sock_shutdown(queue->sock, SHUT_RDWR); else nvmet_tcp_fatal_error(queue); } static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) { struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; u32 len = le32_to_cpu(sgl->length); if (!len) return 0; if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET)) { if (!nvme_is_write(cmd->req.cmd)) return NVME_SC_INVALID_FIELD | NVME_SC_DNR; if (len > cmd->req.port->inline_data_size) return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; cmd->pdu_len = len; } cmd->req.transfer_len += len; cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); if (!cmd->req.sg) return NVME_SC_INTERNAL; cmd->cur_sg = cmd->req.sg; if (nvmet_tcp_has_data_in(cmd)) { cmd->iov = kmalloc_array(cmd->req.sg_cnt, sizeof(*cmd->iov), GFP_KERNEL); if (!cmd->iov) goto err; } return 0; err: nvmet_tcp_free_cmd_buffers(cmd); return NVME_SC_INTERNAL; } static void nvmet_tcp_calc_ddgst(struct ahash_request *hash, struct nvmet_tcp_cmd *cmd) { ahash_request_set_crypt(hash, cmd->req.sg, (void *)&cmd->exp_ddgst, cmd->req.transfer_len); crypto_ahash_digest(hash); } static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); cmd->offset = 0; cmd->state = NVMET_TCP_SEND_DATA_PDU; pdu->hdr.type = nvme_tcp_c2h_data; pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? NVME_TCP_F_DATA_SUCCESS : 0); pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.pdo = pdu->hdr.hlen + hdgst; pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst + cmd->req.transfer_len + ddgst); pdu->command_id = cmd->req.cqe->command_id; pdu->data_length = cpu_to_le32(cmd->req.transfer_len); pdu->data_offset = cpu_to_le32(cmd->wbytes_done); if (queue->data_digest) { pdu->hdr.flags |= NVME_TCP_F_DDGST; nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); } if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); } } static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; cmd->state = NVMET_TCP_SEND_R2T; pdu->hdr.type = nvme_tcp_r2t; pdu->hdr.flags = 0; pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.pdo = 0; pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); pdu->command_id = cmd->req.cmd->common.command_id; pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); } } static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; cmd->state = NVMET_TCP_SEND_RESPONSE; pdu->hdr.type = nvme_tcp_rsp; pdu->hdr.flags = 0; pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.pdo = 0; pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); } } static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) { struct llist_node *node; struct nvmet_tcp_cmd *cmd; for (node = llist_del_all(&queue->resp_list); node; node = node->next) { cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); list_add(&cmd->entry, &queue->resp_send_list); queue->send_list_len++; } } static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) { queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, struct nvmet_tcp_cmd, entry); if (!queue->snd_cmd) { nvmet_tcp_process_resp_list(queue); queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, struct nvmet_tcp_cmd, entry); if (unlikely(!queue->snd_cmd)) return NULL; } list_del_init(&queue->snd_cmd->entry); queue->send_list_len--; if (nvmet_tcp_need_data_out(queue->snd_cmd)) nvmet_setup_c2h_data_pdu(queue->snd_cmd); else if (nvmet_tcp_need_data_in(queue->snd_cmd)) nvmet_setup_r2t_pdu(queue->snd_cmd); else nvmet_setup_response_pdu(queue->snd_cmd); return queue->snd_cmd; } static void nvmet_tcp_queue_response(struct nvmet_req *req) { struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; struct nvme_sgl_desc *sgl; u32 len; if (unlikely(cmd == queue->cmd)) { sgl = &cmd->req.cmd->common.dptr.sgl; len = le32_to_cpu(sgl->length); /* * Wait for inline data before processing the response. * Avoid using helpers, this might happen before * nvmet_req_init is completed. */ if (queue->rcv_state == NVMET_TCP_RECV_PDU && len && len <= cmd->req.port->inline_data_size && nvme_is_write(cmd->req.cmd)) return; } llist_add(&cmd->lentry, &queue->resp_list); queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); } static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) { if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) nvmet_tcp_queue_response(&cmd->req); else cmd->req.execute(&cmd->req); } static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) { struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES, }; struct bio_vec bvec; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; int ret; bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); ret = sock_sendmsg(cmd->queue->sock, &msg); if (ret <= 0) return ret; cmd->offset += ret; left -= ret; if (left) return -EAGAIN; cmd->state = NVMET_TCP_SEND_DATA; cmd->offset = 0; return 1; } static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; int ret; while (cmd->cur_sg) { struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; struct page *page = sg_page(cmd->cur_sg); struct bio_vec bvec; u32 left = cmd->cur_sg->length - cmd->offset; if ((!last_in_batch && cmd->queue->send_list_len) || cmd->wbytes_done + left < cmd->req.transfer_len || queue->data_digest || !queue->nvme_sq.sqhd_disabled) msg.msg_flags |= MSG_MORE; bvec_set_page(&bvec, page, left, cmd->offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); ret = sock_sendmsg(cmd->queue->sock, &msg); if (ret <= 0) return ret; cmd->offset += ret; cmd->wbytes_done += ret; /* Done with sg?*/ if (cmd->offset == cmd->cur_sg->length) { cmd->cur_sg = sg_next(cmd->cur_sg); cmd->offset = 0; } } if (queue->data_digest) { cmd->state = NVMET_TCP_SEND_DDGST; cmd->offset = 0; } else { if (queue->nvme_sq.sqhd_disabled) { cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); } else { nvmet_setup_response_pdu(cmd); } } if (queue->nvme_sq.sqhd_disabled) nvmet_tcp_free_cmd_buffers(cmd); return 1; } static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; struct bio_vec bvec; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; int ret; if (!last_in_batch && cmd->queue->send_list_len) msg.msg_flags |= MSG_MORE; else msg.msg_flags |= MSG_EOR; bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); ret = sock_sendmsg(cmd->queue->sock, &msg); if (ret <= 0) return ret; cmd->offset += ret; left -= ret; if (left) return -EAGAIN; nvmet_tcp_free_cmd_buffers(cmd); cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); return 1; } static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; struct bio_vec bvec; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; int ret; if (!last_in_batch && cmd->queue->send_list_len) msg.msg_flags |= MSG_MORE; else msg.msg_flags |= MSG_EOR; bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); ret = sock_sendmsg(cmd->queue->sock, &msg); if (ret <= 0) return ret; cmd->offset += ret; left -= ret; if (left) return -EAGAIN; cmd->queue->snd_cmd = NULL; return 1; } static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, .iov_len = left }; int ret; if (!last_in_batch && cmd->queue->send_list_len) msg.msg_flags |= MSG_MORE; else msg.msg_flags |= MSG_EOR; ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); if (unlikely(ret <= 0)) return ret; cmd->offset += ret; left -= ret; if (left) return -EAGAIN; if (queue->nvme_sq.sqhd_disabled) { cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); } else { nvmet_setup_response_pdu(cmd); } return 1; } static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, bool last_in_batch) { struct nvmet_tcp_cmd *cmd = queue->snd_cmd; int ret = 0; if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { cmd = nvmet_tcp_fetch_cmd(queue); if (unlikely(!cmd)) return 0; } if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { ret = nvmet_try_send_data_pdu(cmd); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_DATA) { ret = nvmet_try_send_data(cmd, last_in_batch); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_DDGST) { ret = nvmet_try_send_ddgst(cmd, last_in_batch); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_R2T) { ret = nvmet_try_send_r2t(cmd, last_in_batch); if (ret <= 0) goto done_send; } if (cmd->state == NVMET_TCP_SEND_RESPONSE) ret = nvmet_try_send_response(cmd, last_in_batch); done_send: if (ret < 0) { if (ret == -EAGAIN) return 0; return ret; } return 1; } static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, int budget, int *sends) { int i, ret = 0; for (i = 0; i < budget; i++) { ret = nvmet_tcp_try_send_one(queue, i == budget - 1); if (unlikely(ret < 0)) { nvmet_tcp_socket_error(queue, ret); goto done; } else if (ret == 0) { break; } (*sends)++; } done: return ret; } static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) { queue->offset = 0; queue->left = sizeof(struct nvme_tcp_hdr); queue->cmd = NULL; queue->rcv_state = NVMET_TCP_RECV_PDU; } static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); ahash_request_free(queue->rcv_hash); ahash_request_free(queue->snd_hash); crypto_free_ahash(tfm); } static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) { struct crypto_ahash *tfm; tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return PTR_ERR(tfm); queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); if (!queue->snd_hash) goto free_tfm; ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); if (!queue->rcv_hash) goto free_snd_hash; ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); return 0; free_snd_hash: ahash_request_free(queue->snd_hash); free_tfm: crypto_free_ahash(tfm); return -ENOMEM; } static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) { struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; struct msghdr msg = {}; struct kvec iov; int ret; if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { pr_err("bad nvme-tcp pdu length (%d)\n", le32_to_cpu(icreq->hdr.plen)); nvmet_tcp_fatal_error(queue); } if (icreq->pfv != NVME_TCP_PFV_1_0) { pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); return -EPROTO; } if (icreq->hpda != 0) { pr_err("queue %d: unsupported hpda %d\n", queue->idx, icreq->hpda); return -EPROTO; } queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); if (queue->hdr_digest || queue->data_digest) { ret = nvmet_tcp_alloc_crypto(queue); if (ret) return ret; } memset(icresp, 0, sizeof(*icresp)); icresp->hdr.type = nvme_tcp_icresp; icresp->hdr.hlen = sizeof(*icresp); icresp->hdr.pdo = 0; icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */ icresp->cpda = 0; if (queue->hdr_digest) icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; if (queue->data_digest) icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; iov.iov_base = icresp; iov.iov_len = sizeof(*icresp); ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); if (ret < 0) goto free_crypto; queue->state = NVMET_TCP_Q_LIVE; nvmet_prepare_receive_pdu(queue); return 0; free_crypto: if (queue->hdr_digest || queue->data_digest) nvmet_tcp_free_crypto(queue); return ret; } static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) { size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); int ret; /* * This command has not been processed yet, hence we are trying to * figure out if there is still pending data left to receive. If * we don't, we can simply prepare for the next pdu and bail out, * otherwise we will need to prepare a buffer and receive the * stale data before continuing forward. */ if (!nvme_is_write(cmd->req.cmd) || !data_len || data_len > cmd->req.port->inline_data_size) { nvmet_prepare_receive_pdu(queue); return; } ret = nvmet_tcp_map_data(cmd); if (unlikely(ret)) { pr_err("queue %d: failed to map data\n", queue->idx); nvmet_tcp_fatal_error(queue); return; } queue->rcv_state = NVMET_TCP_RECV_DATA; nvmet_tcp_build_pdu_iovec(cmd); cmd->flags |= NVMET_TCP_F_INIT_FAILED; } static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) { struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; if (likely(queue->nr_cmds)) { if (unlikely(data->ttag >= queue->nr_cmds)) { pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", queue->idx, data->ttag, queue->nr_cmds); nvmet_tcp_fatal_error(queue); return -EPROTO; } cmd = &queue->cmds[data->ttag]; } else { cmd = &queue->connect; } if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { pr_err("ttag %u unexpected data offset %u (expected %u)\n", data->ttag, le32_to_cpu(data->data_offset), cmd->rbytes_done); /* FIXME: use path and transport errors */ nvmet_req_complete(&cmd->req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); return -EPROTO; } cmd->pdu_len = le32_to_cpu(data->data_length); cmd->pdu_recv = 0; nvmet_tcp_build_pdu_iovec(cmd); queue->cmd = cmd; queue->rcv_state = NVMET_TCP_RECV_DATA; return 0; } static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) { struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; struct nvmet_req *req; int ret; if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { if (hdr->type != nvme_tcp_icreq) { pr_err("unexpected pdu type (%d) before icreq\n", hdr->type); nvmet_tcp_fatal_error(queue); return -EPROTO; } return nvmet_tcp_handle_icreq(queue); } if (unlikely(hdr->type == nvme_tcp_icreq)) { pr_err("queue %d: received icreq pdu in state %d\n", queue->idx, queue->state); nvmet_tcp_fatal_error(queue); return -EPROTO; } if (hdr->type == nvme_tcp_h2c_data) { ret = nvmet_tcp_handle_h2c_data_pdu(queue); if (unlikely(ret)) return ret; return 0; } queue->cmd = nvmet_tcp_get_cmd(queue); if (unlikely(!queue->cmd)) { /* This should never happen */ pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", queue->idx, queue->nr_cmds, queue->send_list_len, nvme_cmd->common.opcode); nvmet_tcp_fatal_error(queue); return -ENOMEM; } req = &queue->cmd->req; memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_tcp_ops))) { pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", req->cmd, req->cmd->common.command_id, req->cmd->common.opcode, le32_to_cpu(req->cmd->common.dptr.sgl.length)); nvmet_tcp_handle_req_failure(queue, queue->cmd, req); return 0; } ret = nvmet_tcp_map_data(queue->cmd); if (unlikely(ret)) { pr_err("queue %d: failed to map data\n", queue->idx); if (nvmet_tcp_has_inline_data(queue->cmd)) nvmet_tcp_fatal_error(queue); else nvmet_req_complete(req, ret); ret = -EAGAIN; goto out; } if (nvmet_tcp_need_data_in(queue->cmd)) { if (nvmet_tcp_has_inline_data(queue->cmd)) { queue->rcv_state = NVMET_TCP_RECV_DATA; nvmet_tcp_build_pdu_iovec(queue->cmd); return 0; } /* send back R2T */ nvmet_tcp_queue_response(&queue->cmd->req); goto out; } queue->cmd->req.execute(&queue->cmd->req); out: nvmet_prepare_receive_pdu(queue); return ret; } static const u8 nvme_tcp_pdu_sizes[] = { [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), }; static inline u8 nvmet_tcp_pdu_size(u8 type) { size_t idx = type; return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && nvme_tcp_pdu_sizes[idx]) ? nvme_tcp_pdu_sizes[idx] : 0; } static inline bool nvmet_tcp_pdu_valid(u8 type) { switch (type) { case nvme_tcp_icreq: case nvme_tcp_cmd: case nvme_tcp_h2c_data: /* fallthru */ return true; } return false; } static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) { struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; int len; struct kvec iov; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; recv: iov.iov_base = (void *)&queue->pdu + queue->offset; iov.iov_len = queue->left; len = kernel_recvmsg(queue->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); if (unlikely(len < 0)) return len; queue->offset += len; queue->left -= len; if (queue->left) return -EAGAIN; if (queue->offset == sizeof(struct nvme_tcp_hdr)) { u8 hdgst = nvmet_tcp_hdgst_len(queue); if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { pr_err("unexpected pdu type %d\n", hdr->type); nvmet_tcp_fatal_error(queue); return -EIO; } if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); return -EIO; } queue->left = hdr->hlen - queue->offset + hdgst; goto recv; } if (queue->hdr_digest && nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { nvmet_tcp_fatal_error(queue); /* fatal */ return -EPROTO; } if (queue->data_digest && nvmet_tcp_check_ddgst(queue, &queue->pdu)) { nvmet_tcp_fatal_error(queue); /* fatal */ return -EPROTO; } return nvmet_tcp_done_recv_pdu(queue); } static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); queue->offset = 0; queue->left = NVME_TCP_DIGEST_LENGTH; queue->rcv_state = NVMET_TCP_RECV_DDGST; } static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmd = queue->cmd; int ret; while (msg_data_left(&cmd->recv_msg)) { ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, cmd->recv_msg.msg_flags); if (ret <= 0) return ret; cmd->pdu_recv += ret; cmd->rbytes_done += ret; } if (queue->data_digest) { nvmet_tcp_prep_recv_ddgst(cmd); return 0; } if (cmd->rbytes_done == cmd->req.transfer_len) nvmet_tcp_execute_request(cmd); nvmet_prepare_receive_pdu(queue); return 0; } static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmd = queue->cmd; int ret; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = (void *)&cmd->recv_ddgst + queue->offset, .iov_len = queue->left }; ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); if (unlikely(ret < 0)) return ret; queue->offset += ret; queue->left -= ret; if (queue->left) return -EAGAIN; if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", queue->idx, cmd->req.cmd->common.command_id, queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), le32_to_cpu(cmd->exp_ddgst)); nvmet_req_uninit(&cmd->req); nvmet_tcp_free_cmd_buffers(cmd); nvmet_tcp_fatal_error(queue); ret = -EPROTO; goto out; } if (cmd->rbytes_done == cmd->req.transfer_len) nvmet_tcp_execute_request(cmd); ret = 0; out: nvmet_prepare_receive_pdu(queue); return ret; } static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) { int result = 0; if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) return 0; if (queue->rcv_state == NVMET_TCP_RECV_PDU) { result = nvmet_tcp_try_recv_pdu(queue); if (result != 0) goto done_recv; } if (queue->rcv_state == NVMET_TCP_RECV_DATA) { result = nvmet_tcp_try_recv_data(queue); if (result != 0) goto done_recv; } if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { result = nvmet_tcp_try_recv_ddgst(queue); if (result != 0) goto done_recv; } done_recv: if (result < 0) { if (result == -EAGAIN) return 0; return result; } return 1; } static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, int budget, int *recvs) { int i, ret = 0; for (i = 0; i < budget; i++) { ret = nvmet_tcp_try_recv_one(queue); if (unlikely(ret < 0)) { nvmet_tcp_socket_error(queue, ret); goto done; } else if (ret == 0) { break; } (*recvs)++; } done: return ret; } static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) { spin_lock(&queue->state_lock); if (queue->state != NVMET_TCP_Q_DISCONNECTING) { queue->state = NVMET_TCP_Q_DISCONNECTING; queue_work(nvmet_wq, &queue->release_work); } spin_unlock(&queue->state_lock); } static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) { queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs); } static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, int ops) { if (!idle_poll_period_usecs) return false; if (ops) nvmet_tcp_arm_queue_deadline(queue); return !time_after(jiffies, queue->poll_end); } static void nvmet_tcp_io_work(struct work_struct *w) { struct nvmet_tcp_queue *queue = container_of(w, struct nvmet_tcp_queue, io_work); bool pending; int ret, ops = 0; do { pending = false; ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); if (ret > 0) pending = true; else if (ret < 0) return; ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); if (ret > 0) pending = true; else if (ret < 0) return; } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); /* * Requeue the worker if idle deadline period is in progress or any * ops activity was recorded during the do-while loop above. */ if (nvmet_tcp_check_queue_deadline(queue, ops) || pending) queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); } static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *c) { u8 hdgst = nvmet_tcp_hdgst_len(queue); c->queue = queue; c->req.port = queue->port->nport; c->cmd_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->cmd_pdu) return -ENOMEM; c->req.cmd = &c->cmd_pdu->cmd; c->rsp_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->rsp_pdu) goto out_free_cmd; c->req.cqe = &c->rsp_pdu->cqe; c->data_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->data_pdu) goto out_free_rsp; c->r2t_pdu = page_frag_alloc(&queue->pf_cache, sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); if (!c->r2t_pdu) goto out_free_data; c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; list_add_tail(&c->entry, &queue->free_list); return 0; out_free_data: page_frag_free(c->data_pdu); out_free_rsp: page_frag_free(c->rsp_pdu); out_free_cmd: page_frag_free(c->cmd_pdu); return -ENOMEM; } static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) { page_frag_free(c->r2t_pdu); page_frag_free(c->data_pdu); page_frag_free(c->rsp_pdu); page_frag_free(c->cmd_pdu); } static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmds; int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); if (!cmds) goto out; for (i = 0; i < nr_cmds; i++) { ret = nvmet_tcp_alloc_cmd(queue, cmds + i); if (ret) goto out_free; } queue->cmds = cmds; return 0; out_free: while (--i >= 0) nvmet_tcp_free_cmd(cmds + i); kfree(cmds); out: return ret; } static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmds = queue->cmds; int i; for (i = 0; i < queue->nr_cmds; i++) nvmet_tcp_free_cmd(cmds + i); nvmet_tcp_free_cmd(&queue->connect); kfree(cmds); } static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_data_ready = queue->data_ready; sock->sk->sk_state_change = queue->state_change; sock->sk->sk_write_space = queue->write_space; sock->sk->sk_user_data = NULL; write_unlock_bh(&sock->sk->sk_callback_lock); } static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmd = queue->cmds; int i; for (i = 0; i < queue->nr_cmds; i++, cmd++) { if (nvmet_tcp_need_data_in(cmd)) nvmet_req_uninit(&cmd->req); } if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { /* failed in connect */ nvmet_req_uninit(&queue->connect.req); } } static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) { struct nvmet_tcp_cmd *cmd = queue->cmds; int i; for (i = 0; i < queue->nr_cmds; i++, cmd++) { if (nvmet_tcp_need_data_in(cmd)) nvmet_tcp_free_cmd_buffers(cmd); } if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) nvmet_tcp_free_cmd_buffers(&queue->connect); } static void nvmet_tcp_release_queue_work(struct work_struct *w) { struct page *page; struct nvmet_tcp_queue *queue = container_of(w, struct nvmet_tcp_queue, release_work); mutex_lock(&nvmet_tcp_queue_mutex); list_del_init(&queue->queue_list); mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_tcp_restore_socket_callbacks(queue); cancel_work_sync(&queue->io_work); /* stop accepting incoming data */ queue->rcv_state = NVMET_TCP_RECV_ERR; nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); cancel_work_sync(&queue->io_work); nvmet_tcp_free_cmd_data_in_buffers(queue); sock_release(queue->sock); nvmet_tcp_free_cmds(queue); if (queue->hdr_digest || queue->data_digest) nvmet_tcp_free_crypto(queue); ida_free(&nvmet_tcp_queue_ida, queue->idx); page = virt_to_head_page(queue->pf_cache.va); __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); kfree(queue); } static void nvmet_tcp_data_ready(struct sock *sk) { struct nvmet_tcp_queue *queue; trace_sk_data_ready(sk); read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (likely(queue)) queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); read_unlock_bh(&sk->sk_callback_lock); } static void nvmet_tcp_write_space(struct sock *sk) { struct nvmet_tcp_queue *queue; read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (unlikely(!queue)) goto out; if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { queue->write_space(sk); goto out; } if (sk_stream_is_writeable(sk)) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); } out: read_unlock_bh(&sk->sk_callback_lock); } static void nvmet_tcp_state_change(struct sock *sk) { struct nvmet_tcp_queue *queue; read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (!queue) goto done; switch (sk->sk_state) { case TCP_FIN_WAIT2: case TCP_LAST_ACK: break; case TCP_FIN_WAIT1: case TCP_CLOSE_WAIT: case TCP_CLOSE: /* FALLTHRU */ nvmet_tcp_schedule_release_queue(queue); break; default: pr_warn("queue %d unhandled state %d\n", queue->idx, sk->sk_state); } done: read_unlock_bh(&sk->sk_callback_lock); } static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; struct inet_sock *inet = inet_sk(sock->sk); int ret; ret = kernel_getsockname(sock, (struct sockaddr *)&queue->sockaddr); if (ret < 0) return ret; ret = kernel_getpeername(sock, (struct sockaddr *)&queue->sockaddr_peer); if (ret < 0) return ret; /* * Cleanup whatever is sitting in the TCP transmit queue on socket * close. This is done to prevent stale data from being sent should * the network connection be restored before TCP times out. */ sock_no_linger(sock->sk); if (so_priority > 0) sock_set_priority(sock->sk, so_priority); /* Set socket type of service */ if (inet->rcv_tos > 0) ip_sock_set_tos(sock->sk, inet->rcv_tos); ret = 0; write_lock_bh(&sock->sk->sk_callback_lock); if (sock->sk->sk_state != TCP_ESTABLISHED) { /* * If the socket is already closing, don't even start * consuming it */ ret = -ENOTCONN; } else { sock->sk->sk_user_data = queue; queue->data_ready = sock->sk->sk_data_ready; sock->sk->sk_data_ready = nvmet_tcp_data_ready; queue->state_change = sock->sk->sk_state_change; sock->sk->sk_state_change = nvmet_tcp_state_change; queue->write_space = sock->sk->sk_write_space; sock->sk->sk_write_space = nvmet_tcp_write_space; if (idle_poll_period_usecs) nvmet_tcp_arm_queue_deadline(queue); queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); } write_unlock_bh(&sock->sk->sk_callback_lock); return ret; } static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, struct socket *newsock) { struct nvmet_tcp_queue *queue; int ret; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) return -ENOMEM; INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); INIT_WORK(&queue->io_work, nvmet_tcp_io_work); queue->sock = newsock; queue->port = port; queue->nr_cmds = 0; spin_lock_init(&queue->state_lock); queue->state = NVMET_TCP_Q_CONNECTING; INIT_LIST_HEAD(&queue->free_list); init_llist_head(&queue->resp_list); INIT_LIST_HEAD(&queue->resp_send_list); queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL); if (queue->idx < 0) { ret = queue->idx; goto out_free_queue; } ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); if (ret) goto out_ida_remove; ret = nvmet_sq_init(&queue->nvme_sq); if (ret) goto out_free_connect; nvmet_prepare_receive_pdu(queue); mutex_lock(&nvmet_tcp_queue_mutex); list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); mutex_unlock(&nvmet_tcp_queue_mutex); ret = nvmet_tcp_set_queue_sock(queue); if (ret) goto out_destroy_sq; return 0; out_destroy_sq: mutex_lock(&nvmet_tcp_queue_mutex); list_del_init(&queue->queue_list); mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_sq_destroy(&queue->nvme_sq); out_free_connect: nvmet_tcp_free_cmd(&queue->connect); out_ida_remove: ida_free(&nvmet_tcp_queue_ida, queue->idx); out_free_queue: kfree(queue); return ret; } static void nvmet_tcp_accept_work(struct work_struct *w) { struct nvmet_tcp_port *port = container_of(w, struct nvmet_tcp_port, accept_work); struct socket *newsock; int ret; while (true) { ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); if (ret < 0) { if (ret != -EAGAIN) pr_warn("failed to accept err=%d\n", ret); return; } ret = nvmet_tcp_alloc_queue(port, newsock); if (ret) { pr_err("failed to allocate queue\n"); sock_release(newsock); } } } static void nvmet_tcp_listen_data_ready(struct sock *sk) { struct nvmet_tcp_port *port; trace_sk_data_ready(sk); read_lock_bh(&sk->sk_callback_lock); port = sk->sk_user_data; if (!port) goto out; if (sk->sk_state == TCP_LISTEN) queue_work(nvmet_wq, &port->accept_work); out: read_unlock_bh(&sk->sk_callback_lock); } static int nvmet_tcp_add_port(struct nvmet_port *nport) { struct nvmet_tcp_port *port; __kernel_sa_family_t af; int ret; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; switch (nport->disc_addr.adrfam) { case NVMF_ADDR_FAMILY_IP4: af = AF_INET; break; case NVMF_ADDR_FAMILY_IP6: af = AF_INET6; break; default: pr_err("address family %d not supported\n", nport->disc_addr.adrfam); ret = -EINVAL; goto err_port; } ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, nport->disc_addr.trsvcid, &port->addr); if (ret) { pr_err("malformed ip/port passed: %s:%s\n", nport->disc_addr.traddr, nport->disc_addr.trsvcid); goto err_port; } port->nport = nport; INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); if (port->nport->inline_data_size < 0) port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; ret = sock_create(port->addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &port->sock); if (ret) { pr_err("failed to create a socket\n"); goto err_port; } port->sock->sk->sk_user_data = port; port->data_ready = port->sock->sk->sk_data_ready; port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; sock_set_reuseaddr(port->sock->sk); tcp_sock_set_nodelay(port->sock->sk); if (so_priority > 0) sock_set_priority(port->sock->sk, so_priority); ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, sizeof(port->addr)); if (ret) { pr_err("failed to bind port socket %d\n", ret); goto err_sock; } ret = kernel_listen(port->sock, 128); if (ret) { pr_err("failed to listen %d on port sock\n", ret); goto err_sock; } nport->priv = port; pr_info("enabling port %d (%pISpc)\n", le16_to_cpu(nport->disc_addr.portid), &port->addr); return 0; err_sock: sock_release(port->sock); err_port: kfree(port); return ret; } static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port) { struct nvmet_tcp_queue *queue; mutex_lock(&nvmet_tcp_queue_mutex); list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) if (queue->port == port) kernel_sock_shutdown(queue->sock, SHUT_RDWR); mutex_unlock(&nvmet_tcp_queue_mutex); } static void nvmet_tcp_remove_port(struct nvmet_port *nport) { struct nvmet_tcp_port *port = nport->priv; write_lock_bh(&port->sock->sk->sk_callback_lock); port->sock->sk->sk_data_ready = port->data_ready; port->sock->sk->sk_user_data = NULL; write_unlock_bh(&port->sock->sk->sk_callback_lock); cancel_work_sync(&port->accept_work); /* * Destroy the remaining queues, which are not belong to any * controller yet. */ nvmet_tcp_destroy_port_queues(port); sock_release(port->sock); kfree(port); } static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) { struct nvmet_tcp_queue *queue; mutex_lock(&nvmet_tcp_queue_mutex); list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) if (queue->nvme_sq.ctrl == ctrl) kernel_sock_shutdown(queue->sock, SHUT_RDWR); mutex_unlock(&nvmet_tcp_queue_mutex); } static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) { struct nvmet_tcp_queue *queue = container_of(sq, struct nvmet_tcp_queue, nvme_sq); if (sq->qid == 0) { /* Let inflight controller teardown complete */ flush_workqueue(nvmet_wq); } queue->nr_cmds = sq->size * 2; if (nvmet_tcp_alloc_cmds(queue)) return NVME_SC_INTERNAL; return 0; } static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, struct nvmet_port *nport, char *traddr) { struct nvmet_tcp_port *port = nport->priv; if (inet_addr_is_any((struct sockaddr *)&port->addr)) { struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); } else { memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); } } static const struct nvmet_fabrics_ops nvmet_tcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_TCP, .msdbd = 1, .add_port = nvmet_tcp_add_port, .remove_port = nvmet_tcp_remove_port, .queue_response = nvmet_tcp_queue_response, .delete_ctrl = nvmet_tcp_delete_ctrl, .install_queue = nvmet_tcp_install_queue, .disc_traddr = nvmet_tcp_disc_port_addr, }; static int __init nvmet_tcp_init(void) { int ret; nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!nvmet_tcp_wq) return -ENOMEM; ret = nvmet_register_transport(&nvmet_tcp_ops); if (ret) goto err; return 0; err: destroy_workqueue(nvmet_tcp_wq); return ret; } static void __exit nvmet_tcp_exit(void) { struct nvmet_tcp_queue *queue; nvmet_unregister_transport(&nvmet_tcp_ops); flush_workqueue(nvmet_wq); mutex_lock(&nvmet_tcp_queue_mutex); list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) kernel_sock_shutdown(queue->sock, SHUT_RDWR); mutex_unlock(&nvmet_tcp_queue_mutex); flush_workqueue(nvmet_wq); destroy_workqueue(nvmet_tcp_wq); } module_init(nvmet_tcp_init); module_exit(nvmet_tcp_exit); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
linux-master
drivers/nvme/target/tcp.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016 Avago Technologies. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/parser.h> #include <uapi/scsi/fc/fc_fs.h> #include "../host/nvme.h" #include "../target/nvmet.h" #include <linux/nvme-fc-driver.h> #include <linux/nvme-fc.h> enum { NVMF_OPT_ERR = 0, NVMF_OPT_WWNN = 1 << 0, NVMF_OPT_WWPN = 1 << 1, NVMF_OPT_ROLES = 1 << 2, NVMF_OPT_FCADDR = 1 << 3, NVMF_OPT_LPWWNN = 1 << 4, NVMF_OPT_LPWWPN = 1 << 5, }; struct fcloop_ctrl_options { int mask; u64 wwnn; u64 wwpn; u32 roles; u32 fcaddr; u64 lpwwnn; u64 lpwwpn; }; static const match_table_t opt_tokens = { { NVMF_OPT_WWNN, "wwnn=%s" }, { NVMF_OPT_WWPN, "wwpn=%s" }, { NVMF_OPT_ROLES, "roles=%d" }, { NVMF_OPT_FCADDR, "fcaddr=%x" }, { NVMF_OPT_LPWWNN, "lpwwnn=%s" }, { NVMF_OPT_LPWWPN, "lpwwpn=%s" }, { NVMF_OPT_ERR, NULL } }; static int fcloop_verify_addr(substring_t *s) { size_t blen = s->to - s->from + 1; if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 || strncmp(s->from, "0x", 2)) return -EINVAL; return 0; } static int fcloop_parse_options(struct fcloop_ctrl_options *opts, const char *buf) { substring_t args[MAX_OPT_ARGS]; char *options, *o, *p; int token, ret = 0; u64 token64; options = o = kstrdup(buf, GFP_KERNEL); if (!options) return -ENOMEM; while ((p = strsep(&o, ",\n")) != NULL) { if (!*p) continue; token = match_token(p, opt_tokens, args); opts->mask |= token; switch (token) { case NVMF_OPT_WWNN: if (fcloop_verify_addr(args) || match_u64(args, &token64)) { ret = -EINVAL; goto out_free_options; } opts->wwnn = token64; break; case NVMF_OPT_WWPN: if (fcloop_verify_addr(args) || match_u64(args, &token64)) { ret = -EINVAL; goto out_free_options; } opts->wwpn = token64; break; case NVMF_OPT_ROLES: if (match_int(args, &token)) { ret = -EINVAL; goto out_free_options; } opts->roles = token; break; case NVMF_OPT_FCADDR: if (match_hex(args, &token)) { ret = -EINVAL; goto out_free_options; } opts->fcaddr = token; break; case NVMF_OPT_LPWWNN: if (fcloop_verify_addr(args) || match_u64(args, &token64)) { ret = -EINVAL; goto out_free_options; } opts->lpwwnn = token64; break; case NVMF_OPT_LPWWPN: if (fcloop_verify_addr(args) || match_u64(args, &token64)) { ret = -EINVAL; goto out_free_options; } opts->lpwwpn = token64; break; default: pr_warn("unknown parameter or missing value '%s'\n", p); ret = -EINVAL; goto out_free_options; } } out_free_options: kfree(options); return ret; } static int fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname, const char *buf) { substring_t args[MAX_OPT_ARGS]; char *options, *o, *p; int token, ret = 0; u64 token64; *nname = -1; *pname = -1; options = o = kstrdup(buf, GFP_KERNEL); if (!options) return -ENOMEM; while ((p = strsep(&o, ",\n")) != NULL) { if (!*p) continue; token = match_token(p, opt_tokens, args); switch (token) { case NVMF_OPT_WWNN: if (fcloop_verify_addr(args) || match_u64(args, &token64)) { ret = -EINVAL; goto out_free_options; } *nname = token64; break; case NVMF_OPT_WWPN: if (fcloop_verify_addr(args) || match_u64(args, &token64)) { ret = -EINVAL; goto out_free_options; } *pname = token64; break; default: pr_warn("unknown parameter or missing value '%s'\n", p); ret = -EINVAL; goto out_free_options; } } out_free_options: kfree(options); if (!ret) { if (*nname == -1) return -EINVAL; if (*pname == -1) return -EINVAL; } return ret; } #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \ NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN) #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) static DEFINE_SPINLOCK(fcloop_lock); static LIST_HEAD(fcloop_lports); static LIST_HEAD(fcloop_nports); struct fcloop_lport { struct nvme_fc_local_port *localport; struct list_head lport_list; struct completion unreg_done; }; struct fcloop_lport_priv { struct fcloop_lport *lport; }; struct fcloop_rport { struct nvme_fc_remote_port *remoteport; struct nvmet_fc_target_port *targetport; struct fcloop_nport *nport; struct fcloop_lport *lport; spinlock_t lock; struct list_head ls_list; struct work_struct ls_work; }; struct fcloop_tport { struct nvmet_fc_target_port *targetport; struct nvme_fc_remote_port *remoteport; struct fcloop_nport *nport; struct fcloop_lport *lport; spinlock_t lock; struct list_head ls_list; struct work_struct ls_work; }; struct fcloop_nport { struct fcloop_rport *rport; struct fcloop_tport *tport; struct fcloop_lport *lport; struct list_head nport_list; struct kref ref; u64 node_name; u64 port_name; u32 port_role; u32 port_id; }; struct fcloop_lsreq { struct nvmefc_ls_req *lsreq; struct nvmefc_ls_rsp ls_rsp; int lsdir; /* H2T or T2H */ int status; struct list_head ls_list; /* fcloop_rport->ls_list */ }; struct fcloop_rscn { struct fcloop_tport *tport; struct work_struct work; }; enum { INI_IO_START = 0, INI_IO_ACTIVE = 1, INI_IO_ABORTED = 2, INI_IO_COMPLETED = 3, }; struct fcloop_fcpreq { struct fcloop_tport *tport; struct nvmefc_fcp_req *fcpreq; spinlock_t reqlock; u16 status; u32 inistate; bool active; bool aborted; struct kref ref; struct work_struct fcp_rcv_work; struct work_struct abort_rcv_work; struct work_struct tio_done_work; struct nvmefc_tgt_fcp_req tgt_fcp_req; }; struct fcloop_ini_fcpreq { struct nvmefc_fcp_req *fcpreq; struct fcloop_fcpreq *tfcp_req; spinlock_t inilock; }; static inline struct fcloop_lsreq * ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp) { return container_of(lsrsp, struct fcloop_lsreq, ls_rsp); } static inline struct fcloop_fcpreq * tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq) { return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req); } static int fcloop_create_queue(struct nvme_fc_local_port *localport, unsigned int qidx, u16 qsize, void **handle) { *handle = localport; return 0; } static void fcloop_delete_queue(struct nvme_fc_local_port *localport, unsigned int idx, void *handle) { } static void fcloop_rport_lsrqst_work(struct work_struct *work) { struct fcloop_rport *rport = container_of(work, struct fcloop_rport, ls_work); struct fcloop_lsreq *tls_req; spin_lock(&rport->lock); for (;;) { tls_req = list_first_entry_or_null(&rport->ls_list, struct fcloop_lsreq, ls_list); if (!tls_req) break; list_del(&tls_req->ls_list); spin_unlock(&rport->lock); tls_req->lsreq->done(tls_req->lsreq, tls_req->status); /* * callee may free memory containing tls_req. * do not reference lsreq after this. */ spin_lock(&rport->lock); } spin_unlock(&rport->lock); } static int fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, struct nvmefc_ls_req *lsreq) { struct fcloop_lsreq *tls_req = lsreq->private; struct fcloop_rport *rport = remoteport->private; int ret = 0; tls_req->lsreq = lsreq; INIT_LIST_HEAD(&tls_req->ls_list); if (!rport->targetport) { tls_req->status = -ECONNREFUSED; spin_lock(&rport->lock); list_add_tail(&rport->ls_list, &tls_req->ls_list); spin_unlock(&rport->lock); queue_work(nvmet_wq, &rport->ls_work); return ret; } tls_req->status = 0; ret = nvmet_fc_rcv_ls_req(rport->targetport, rport, &tls_req->ls_rsp, lsreq->rqstaddr, lsreq->rqstlen); return ret; } static int fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, struct nvmefc_ls_rsp *lsrsp) { struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp); struct nvmefc_ls_req *lsreq = tls_req->lsreq; struct fcloop_tport *tport = targetport->private; struct nvme_fc_remote_port *remoteport = tport->remoteport; struct fcloop_rport *rport; memcpy(lsreq->rspaddr, lsrsp->rspbuf, ((lsreq->rsplen < lsrsp->rsplen) ? lsreq->rsplen : lsrsp->rsplen)); lsrsp->done(lsrsp); if (remoteport) { rport = remoteport->private; spin_lock(&rport->lock); list_add_tail(&rport->ls_list, &tls_req->ls_list); spin_unlock(&rport->lock); queue_work(nvmet_wq, &rport->ls_work); } return 0; } static void fcloop_tport_lsrqst_work(struct work_struct *work) { struct fcloop_tport *tport = container_of(work, struct fcloop_tport, ls_work); struct fcloop_lsreq *tls_req; spin_lock(&tport->lock); for (;;) { tls_req = list_first_entry_or_null(&tport->ls_list, struct fcloop_lsreq, ls_list); if (!tls_req) break; list_del(&tls_req->ls_list); spin_unlock(&tport->lock); tls_req->lsreq->done(tls_req->lsreq, tls_req->status); /* * callee may free memory containing tls_req. * do not reference lsreq after this. */ spin_lock(&tport->lock); } spin_unlock(&tport->lock); } static int fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *lsreq) { struct fcloop_lsreq *tls_req = lsreq->private; struct fcloop_tport *tport = targetport->private; int ret = 0; /* * hosthandle should be the dst.rport value. * hosthandle ignored as fcloop currently is * 1:1 tgtport vs remoteport */ tls_req->lsreq = lsreq; INIT_LIST_HEAD(&tls_req->ls_list); if (!tport->remoteport) { tls_req->status = -ECONNREFUSED; spin_lock(&tport->lock); list_add_tail(&tport->ls_list, &tls_req->ls_list); spin_unlock(&tport->lock); queue_work(nvmet_wq, &tport->ls_work); return ret; } tls_req->status = 0; ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp, lsreq->rqstaddr, lsreq->rqstlen); return ret; } static int fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, struct nvmefc_ls_rsp *lsrsp) { struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp); struct nvmefc_ls_req *lsreq = tls_req->lsreq; struct fcloop_rport *rport = remoteport->private; struct nvmet_fc_target_port *targetport = rport->targetport; struct fcloop_tport *tport; memcpy(lsreq->rspaddr, lsrsp->rspbuf, ((lsreq->rsplen < lsrsp->rsplen) ? lsreq->rsplen : lsrsp->rsplen)); lsrsp->done(lsrsp); if (targetport) { tport = targetport->private; spin_lock(&tport->lock); list_add_tail(&tport->ls_list, &tls_req->ls_list); spin_unlock(&tport->lock); queue_work(nvmet_wq, &tport->ls_work); } return 0; } static void fcloop_t2h_host_release(void *hosthandle) { /* host handle ignored for now */ } /* * Simulate reception of RSCN and converting it to a initiator transport * call to rescan a remote port. */ static void fcloop_tgt_rscn_work(struct work_struct *work) { struct fcloop_rscn *tgt_rscn = container_of(work, struct fcloop_rscn, work); struct fcloop_tport *tport = tgt_rscn->tport; if (tport->remoteport) nvme_fc_rescan_remoteport(tport->remoteport); kfree(tgt_rscn); } static void fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) { struct fcloop_rscn *tgt_rscn; tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL); if (!tgt_rscn) return; tgt_rscn->tport = tgtport->private; INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); queue_work(nvmet_wq, &tgt_rscn->work); } static void fcloop_tfcp_req_free(struct kref *ref) { struct fcloop_fcpreq *tfcp_req = container_of(ref, struct fcloop_fcpreq, ref); kfree(tfcp_req); } static void fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) { kref_put(&tfcp_req->ref, fcloop_tfcp_req_free); } static int fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) { return kref_get_unless_zero(&tfcp_req->ref); } static void fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, struct fcloop_fcpreq *tfcp_req, int status) { struct fcloop_ini_fcpreq *inireq = NULL; if (fcpreq) { inireq = fcpreq->private; spin_lock(&inireq->inilock); inireq->tfcp_req = NULL; spin_unlock(&inireq->inilock); fcpreq->status = status; fcpreq->done(fcpreq); } /* release original io reference on tgt struct */ fcloop_tfcp_req_put(tfcp_req); } static bool drop_fabric_opcode; #define DROP_OPCODE_MASK 0x00FF /* fabrics opcode will have a bit set above 1st byte */ static int drop_opcode = -1; static int drop_instance; static int drop_amount; static int drop_current_cnt; /* * Routine to parse io and determine if the io is to be dropped. * Returns: * 0 if io is not obstructed * 1 if io was dropped */ static int check_for_drop(struct fcloop_fcpreq *tfcp_req) { struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr; struct nvme_command *sqe = &cmdiu->sqe; if (drop_opcode == -1) return 0; pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x " "inst %d start %d amt %d\n", __func__, sqe->common.opcode, sqe->fabrics.fctype, drop_fabric_opcode ? "y" : "n", drop_opcode, drop_current_cnt, drop_instance, drop_amount); if ((drop_fabric_opcode && (sqe->common.opcode != nvme_fabrics_command || sqe->fabrics.fctype != drop_opcode)) || (!drop_fabric_opcode && sqe->common.opcode != drop_opcode)) return 0; if (++drop_current_cnt >= drop_instance) { if (drop_current_cnt >= drop_instance + drop_amount) drop_opcode = -1; return 1; } return 0; } static void fcloop_fcp_recv_work(struct work_struct *work) { struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, fcp_rcv_work); struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; unsigned long flags; int ret = 0; bool aborted = false; spin_lock_irqsave(&tfcp_req->reqlock, flags); switch (tfcp_req->inistate) { case INI_IO_START: tfcp_req->inistate = INI_IO_ACTIVE; break; case INI_IO_ABORTED: aborted = true; break; default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); return; } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); if (unlikely(aborted)) ret = -ECANCELED; else { if (likely(!check_for_drop(tfcp_req))) ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, &tfcp_req->tgt_fcp_req, fcpreq->cmdaddr, fcpreq->cmdlen); else pr_info("%s: dropped command ********\n", __func__); } if (ret) fcloop_call_host_done(fcpreq, tfcp_req, ret); } static void fcloop_fcp_abort_recv_work(struct work_struct *work) { struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, abort_rcv_work); struct nvmefc_fcp_req *fcpreq; bool completed = false; unsigned long flags; spin_lock_irqsave(&tfcp_req->reqlock, flags); fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_ABORTED: break; case INI_IO_COMPLETED: completed = true; break; default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); return; } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); if (unlikely(completed)) { /* remove reference taken in original abort downcall */ fcloop_tfcp_req_put(tfcp_req); return; } if (tfcp_req->tport->targetport) nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, &tfcp_req->tgt_fcp_req); spin_lock_irqsave(&tfcp_req->reqlock, flags); tfcp_req->fcpreq = NULL; spin_unlock_irqrestore(&tfcp_req->reqlock, flags); fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); /* call_host_done releases reference for abort downcall */ } /* * FCP IO operation done by target completion. * call back up initiator "done" flows. */ static void fcloop_tgt_fcprqst_done_work(struct work_struct *work) { struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, tio_done_work); struct nvmefc_fcp_req *fcpreq; unsigned long flags; spin_lock_irqsave(&tfcp_req->reqlock, flags); fcpreq = tfcp_req->fcpreq; tfcp_req->inistate = INI_IO_COMPLETED; spin_unlock_irqrestore(&tfcp_req->reqlock, flags); fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); } static int fcloop_fcp_req(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, void *hw_queue_handle, struct nvmefc_fcp_req *fcpreq) { struct fcloop_rport *rport = remoteport->private; struct fcloop_ini_fcpreq *inireq = fcpreq->private; struct fcloop_fcpreq *tfcp_req; if (!rport->targetport) return -ECONNREFUSED; tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC); if (!tfcp_req) return -ENOMEM; inireq->fcpreq = fcpreq; inireq->tfcp_req = tfcp_req; spin_lock_init(&inireq->inilock); tfcp_req->fcpreq = fcpreq; tfcp_req->tport = rport->targetport->private; tfcp_req->inistate = INI_IO_START; spin_lock_init(&tfcp_req->reqlock); INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); kref_init(&tfcp_req->ref); queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); return 0; } static void fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg, struct scatterlist *io_sg, u32 offset, u32 length) { void *data_p, *io_p; u32 data_len, io_len, tlen; io_p = sg_virt(io_sg); io_len = io_sg->length; for ( ; offset; ) { tlen = min_t(u32, offset, io_len); offset -= tlen; io_len -= tlen; if (!io_len) { io_sg = sg_next(io_sg); io_p = sg_virt(io_sg); io_len = io_sg->length; } else io_p += tlen; } data_p = sg_virt(data_sg); data_len = data_sg->length; for ( ; length; ) { tlen = min_t(u32, io_len, data_len); tlen = min_t(u32, tlen, length); if (op == NVMET_FCOP_WRITEDATA) memcpy(data_p, io_p, tlen); else memcpy(io_p, data_p, tlen); length -= tlen; io_len -= tlen; if ((!io_len) && (length)) { io_sg = sg_next(io_sg); io_p = sg_virt(io_sg); io_len = io_sg->length; } else io_p += tlen; data_len -= tlen; if ((!data_len) && (length)) { data_sg = sg_next(data_sg); data_p = sg_virt(data_sg); data_len = data_sg->length; } else data_p += tlen; } } static int fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *tgt_fcpreq) { struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); struct nvmefc_fcp_req *fcpreq; u32 rsplen = 0, xfrlen = 0; int fcp_err = 0, active, aborted; u8 op = tgt_fcpreq->op; unsigned long flags; spin_lock_irqsave(&tfcp_req->reqlock, flags); fcpreq = tfcp_req->fcpreq; active = tfcp_req->active; aborted = tfcp_req->aborted; tfcp_req->active = true; spin_unlock_irqrestore(&tfcp_req->reqlock, flags); if (unlikely(active)) /* illegal - call while i/o active */ return -EALREADY; if (unlikely(aborted)) { /* target transport has aborted i/o prior */ spin_lock_irqsave(&tfcp_req->reqlock, flags); tfcp_req->active = false; spin_unlock_irqrestore(&tfcp_req->reqlock, flags); tgt_fcpreq->transferred_length = 0; tgt_fcpreq->fcp_error = -ECANCELED; tgt_fcpreq->done(tgt_fcpreq); return 0; } /* * if fcpreq is NULL, the I/O has been aborted (from * initiator side). For the target side, act as if all is well * but don't actually move data. */ switch (op) { case NVMET_FCOP_WRITEDATA: xfrlen = tgt_fcpreq->transfer_length; if (fcpreq) { fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl, tgt_fcpreq->offset, xfrlen); fcpreq->transferred_length += xfrlen; } break; case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA_RSP: xfrlen = tgt_fcpreq->transfer_length; if (fcpreq) { fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl, tgt_fcpreq->offset, xfrlen); fcpreq->transferred_length += xfrlen; } if (op == NVMET_FCOP_READDATA) break; /* Fall-Thru to RSP handling */ fallthrough; case NVMET_FCOP_RSP: if (fcpreq) { rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ? fcpreq->rsplen : tgt_fcpreq->rsplen); memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen); if (rsplen < tgt_fcpreq->rsplen) fcp_err = -E2BIG; fcpreq->rcv_rsplen = rsplen; fcpreq->status = 0; } tfcp_req->status = 0; break; default: fcp_err = -EINVAL; break; } spin_lock_irqsave(&tfcp_req->reqlock, flags); tfcp_req->active = false; spin_unlock_irqrestore(&tfcp_req->reqlock, flags); tgt_fcpreq->transferred_length = xfrlen; tgt_fcpreq->fcp_error = fcp_err; tgt_fcpreq->done(tgt_fcpreq); return 0; } static void fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *tgt_fcpreq) { struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); unsigned long flags; /* * mark aborted only in case there were 2 threads in transport * (one doing io, other doing abort) and only kills ops posted * after the abort request */ spin_lock_irqsave(&tfcp_req->reqlock, flags); tfcp_req->aborted = true; spin_unlock_irqrestore(&tfcp_req->reqlock, flags); tfcp_req->status = NVME_SC_INTERNAL; /* * nothing more to do. If io wasn't active, the transport should * immediately call the req_release. If it was active, the op * will complete, and the lldd should call req_release. */ } static void fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *tgt_fcpreq) { struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); queue_work(nvmet_wq, &tfcp_req->tio_done_work); } static void fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, struct nvmefc_ls_req *lsreq) { } static void fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *lsreq) { } static void fcloop_fcp_abort(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, void *hw_queue_handle, struct nvmefc_fcp_req *fcpreq) { struct fcloop_ini_fcpreq *inireq = fcpreq->private; struct fcloop_fcpreq *tfcp_req; bool abortio = true; unsigned long flags; spin_lock(&inireq->inilock); tfcp_req = inireq->tfcp_req; if (tfcp_req) fcloop_tfcp_req_get(tfcp_req); spin_unlock(&inireq->inilock); if (!tfcp_req) /* abort has already been called */ return; /* break initiator/target relationship for io */ spin_lock_irqsave(&tfcp_req->reqlock, flags); switch (tfcp_req->inistate) { case INI_IO_START: case INI_IO_ACTIVE: tfcp_req->inistate = INI_IO_ABORTED; break; case INI_IO_COMPLETED: abortio = false; break; default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); return; } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); if (abortio) /* leave the reference while the work item is scheduled */ WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work)); else { /* * as the io has already had the done callback made, * nothing more to do. So release the reference taken above */ fcloop_tfcp_req_put(tfcp_req); } } static void fcloop_nport_free(struct kref *ref) { struct fcloop_nport *nport = container_of(ref, struct fcloop_nport, ref); unsigned long flags; spin_lock_irqsave(&fcloop_lock, flags); list_del(&nport->nport_list); spin_unlock_irqrestore(&fcloop_lock, flags); kfree(nport); } static void fcloop_nport_put(struct fcloop_nport *nport) { kref_put(&nport->ref, fcloop_nport_free); } static int fcloop_nport_get(struct fcloop_nport *nport) { return kref_get_unless_zero(&nport->ref); } static void fcloop_localport_delete(struct nvme_fc_local_port *localport) { struct fcloop_lport_priv *lport_priv = localport->private; struct fcloop_lport *lport = lport_priv->lport; /* release any threads waiting for the unreg to complete */ complete(&lport->unreg_done); } static void fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) { struct fcloop_rport *rport = remoteport->private; flush_work(&rport->ls_work); fcloop_nport_put(rport->nport); } static void fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) { struct fcloop_tport *tport = targetport->private; flush_work(&tport->ls_work); fcloop_nport_put(tport->nport); } #define FCLOOP_HW_QUEUES 4 #define FCLOOP_SGL_SEGS 256 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF static struct nvme_fc_port_template fctemplate = { .localport_delete = fcloop_localport_delete, .remoteport_delete = fcloop_remoteport_delete, .create_queue = fcloop_create_queue, .delete_queue = fcloop_delete_queue, .ls_req = fcloop_h2t_ls_req, .fcp_io = fcloop_fcp_req, .ls_abort = fcloop_h2t_ls_abort, .fcp_abort = fcloop_fcp_abort, .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp, .max_hw_queues = FCLOOP_HW_QUEUES, .max_sgl_segments = FCLOOP_SGL_SEGS, .max_dif_sgl_segments = FCLOOP_SGL_SEGS, .dma_boundary = FCLOOP_DMABOUND_4G, /* sizes of additional private data for data structures */ .local_priv_sz = sizeof(struct fcloop_lport_priv), .remote_priv_sz = sizeof(struct fcloop_rport), .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), }; static struct nvmet_fc_target_template tgttemplate = { .targetport_delete = fcloop_targetport_delete, .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp, .fcp_op = fcloop_fcp_op, .fcp_abort = fcloop_tgt_fcp_abort, .fcp_req_release = fcloop_fcp_req_release, .discovery_event = fcloop_tgt_discovery_evt, .ls_req = fcloop_t2h_ls_req, .ls_abort = fcloop_t2h_ls_abort, .host_release = fcloop_t2h_host_release, .max_hw_queues = FCLOOP_HW_QUEUES, .max_sgl_segments = FCLOOP_SGL_SEGS, .max_dif_sgl_segments = FCLOOP_SGL_SEGS, .dma_boundary = FCLOOP_DMABOUND_4G, /* optional features */ .target_features = 0, /* sizes of additional private data for data structures */ .target_priv_sz = sizeof(struct fcloop_tport), .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), }; static ssize_t fcloop_create_local_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_fc_port_info pinfo; struct fcloop_ctrl_options *opts; struct nvme_fc_local_port *localport; struct fcloop_lport *lport; struct fcloop_lport_priv *lport_priv; unsigned long flags; int ret = -ENOMEM; lport = kzalloc(sizeof(*lport), GFP_KERNEL); if (!lport) return -ENOMEM; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) goto out_free_lport; ret = fcloop_parse_options(opts, buf); if (ret) goto out_free_opts; /* everything there ? */ if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) { ret = -EINVAL; goto out_free_opts; } memset(&pinfo, 0, sizeof(pinfo)); pinfo.node_name = opts->wwnn; pinfo.port_name = opts->wwpn; pinfo.port_role = opts->roles; pinfo.port_id = opts->fcaddr; ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); if (!ret) { /* success */ lport_priv = localport->private; lport_priv->lport = lport; lport->localport = localport; INIT_LIST_HEAD(&lport->lport_list); spin_lock_irqsave(&fcloop_lock, flags); list_add_tail(&lport->lport_list, &fcloop_lports); spin_unlock_irqrestore(&fcloop_lock, flags); } out_free_opts: kfree(opts); out_free_lport: /* free only if we're going to fail */ if (ret) kfree(lport); return ret ? ret : count; } static void __unlink_local_port(struct fcloop_lport *lport) { list_del(&lport->lport_list); } static int __wait_localport_unreg(struct fcloop_lport *lport) { int ret; init_completion(&lport->unreg_done); ret = nvme_fc_unregister_localport(lport->localport); if (!ret) wait_for_completion(&lport->unreg_done); kfree(lport); return ret; } static ssize_t fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fcloop_lport *tlport, *lport = NULL; u64 nodename, portname; unsigned long flags; int ret; ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); if (ret) return ret; spin_lock_irqsave(&fcloop_lock, flags); list_for_each_entry(tlport, &fcloop_lports, lport_list) { if (tlport->localport->node_name == nodename && tlport->localport->port_name == portname) { lport = tlport; __unlink_local_port(lport); break; } } spin_unlock_irqrestore(&fcloop_lock, flags); if (!lport) return -ENOENT; ret = __wait_localport_unreg(lport); return ret ? ret : count; } static struct fcloop_nport * fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) { struct fcloop_nport *newnport, *nport = NULL; struct fcloop_lport *tmplport, *lport = NULL; struct fcloop_ctrl_options *opts; unsigned long flags; u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; int ret; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return NULL; ret = fcloop_parse_options(opts, buf); if (ret) goto out_free_opts; /* everything there ? */ if ((opts->mask & opts_mask) != opts_mask) { ret = -EINVAL; goto out_free_opts; } newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); if (!newnport) goto out_free_opts; INIT_LIST_HEAD(&newnport->nport_list); newnport->node_name = opts->wwnn; newnport->port_name = opts->wwpn; if (opts->mask & NVMF_OPT_ROLES) newnport->port_role = opts->roles; if (opts->mask & NVMF_OPT_FCADDR) newnport->port_id = opts->fcaddr; kref_init(&newnport->ref); spin_lock_irqsave(&fcloop_lock, flags); list_for_each_entry(tmplport, &fcloop_lports, lport_list) { if (tmplport->localport->node_name == opts->wwnn && tmplport->localport->port_name == opts->wwpn) goto out_invalid_opts; if (tmplport->localport->node_name == opts->lpwwnn && tmplport->localport->port_name == opts->lpwwpn) lport = tmplport; } if (remoteport) { if (!lport) goto out_invalid_opts; newnport->lport = lport; } list_for_each_entry(nport, &fcloop_nports, nport_list) { if (nport->node_name == opts->wwnn && nport->port_name == opts->wwpn) { if ((remoteport && nport->rport) || (!remoteport && nport->tport)) { nport = NULL; goto out_invalid_opts; } fcloop_nport_get(nport); spin_unlock_irqrestore(&fcloop_lock, flags); if (remoteport) nport->lport = lport; if (opts->mask & NVMF_OPT_ROLES) nport->port_role = opts->roles; if (opts->mask & NVMF_OPT_FCADDR) nport->port_id = opts->fcaddr; goto out_free_newnport; } } list_add_tail(&newnport->nport_list, &fcloop_nports); spin_unlock_irqrestore(&fcloop_lock, flags); kfree(opts); return newnport; out_invalid_opts: spin_unlock_irqrestore(&fcloop_lock, flags); out_free_newnport: kfree(newnport); out_free_opts: kfree(opts); return nport; } static ssize_t fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvme_fc_remote_port *remoteport; struct fcloop_nport *nport; struct fcloop_rport *rport; struct nvme_fc_port_info pinfo; int ret; nport = fcloop_alloc_nport(buf, count, true); if (!nport) return -EIO; memset(&pinfo, 0, sizeof(pinfo)); pinfo.node_name = nport->node_name; pinfo.port_name = nport->port_name; pinfo.port_role = nport->port_role; pinfo.port_id = nport->port_id; ret = nvme_fc_register_remoteport(nport->lport->localport, &pinfo, &remoteport); if (ret || !remoteport) { fcloop_nport_put(nport); return ret; } /* success */ rport = remoteport->private; rport->remoteport = remoteport; rport->targetport = (nport->tport) ? nport->tport->targetport : NULL; if (nport->tport) { nport->tport->remoteport = remoteport; nport->tport->lport = nport->lport; } rport->nport = nport; rport->lport = nport->lport; nport->rport = rport; spin_lock_init(&rport->lock); INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work); INIT_LIST_HEAD(&rport->ls_list); return count; } static struct fcloop_rport * __unlink_remote_port(struct fcloop_nport *nport) { struct fcloop_rport *rport = nport->rport; if (rport && nport->tport) nport->tport->remoteport = NULL; nport->rport = NULL; return rport; } static int __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) { if (!rport) return -EALREADY; return nvme_fc_unregister_remoteport(rport->remoteport); } static ssize_t fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fcloop_nport *nport = NULL, *tmpport; static struct fcloop_rport *rport; u64 nodename, portname; unsigned long flags; int ret; ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); if (ret) return ret; spin_lock_irqsave(&fcloop_lock, flags); list_for_each_entry(tmpport, &fcloop_nports, nport_list) { if (tmpport->node_name == nodename && tmpport->port_name == portname && tmpport->rport) { nport = tmpport; rport = __unlink_remote_port(nport); break; } } spin_unlock_irqrestore(&fcloop_lock, flags); if (!nport) return -ENOENT; ret = __remoteport_unreg(nport, rport); return ret ? ret : count; } static ssize_t fcloop_create_target_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvmet_fc_target_port *targetport; struct fcloop_nport *nport; struct fcloop_tport *tport; struct nvmet_fc_port_info tinfo; int ret; nport = fcloop_alloc_nport(buf, count, false); if (!nport) return -EIO; tinfo.node_name = nport->node_name; tinfo.port_name = nport->port_name; tinfo.port_id = nport->port_id; ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL, &targetport); if (ret) { fcloop_nport_put(nport); return ret; } /* success */ tport = targetport->private; tport->targetport = targetport; tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL; if (nport->rport) nport->rport->targetport = targetport; tport->nport = nport; tport->lport = nport->lport; nport->tport = tport; spin_lock_init(&tport->lock); INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work); INIT_LIST_HEAD(&tport->ls_list); return count; } static struct fcloop_tport * __unlink_target_port(struct fcloop_nport *nport) { struct fcloop_tport *tport = nport->tport; if (tport && nport->rport) nport->rport->targetport = NULL; nport->tport = NULL; return tport; } static int __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) { if (!tport) return -EALREADY; return nvmet_fc_unregister_targetport(tport->targetport); } static ssize_t fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fcloop_nport *nport = NULL, *tmpport; struct fcloop_tport *tport = NULL; u64 nodename, portname; unsigned long flags; int ret; ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); if (ret) return ret; spin_lock_irqsave(&fcloop_lock, flags); list_for_each_entry(tmpport, &fcloop_nports, nport_list) { if (tmpport->node_name == nodename && tmpport->port_name == portname && tmpport->tport) { nport = tmpport; tport = __unlink_target_port(nport); break; } } spin_unlock_irqrestore(&fcloop_lock, flags); if (!nport) return -ENOENT; ret = __targetport_unreg(nport, tport); return ret ? ret : count; } static ssize_t fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int opcode; int starting, amount; if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3) return -EBADRQC; drop_current_cnt = 0; drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false; drop_opcode = (opcode & DROP_OPCODE_MASK); drop_instance = starting; /* the check to drop routine uses instance + count to know when * to end. Thus, if dropping 1 instance, count should be 0. * so subtract 1 from the count. */ drop_amount = amount - 1; pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d " "instances\n", __func__, drop_instance, drop_fabric_opcode ? " fabric" : "", drop_opcode, drop_amount); return count; } static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port); static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port); static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port); static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port); static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port); static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port); static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop); static struct attribute *fcloop_dev_attrs[] = { &dev_attr_add_local_port.attr, &dev_attr_del_local_port.attr, &dev_attr_add_remote_port.attr, &dev_attr_del_remote_port.attr, &dev_attr_add_target_port.attr, &dev_attr_del_target_port.attr, &dev_attr_set_cmd_drop.attr, NULL }; static const struct attribute_group fclopp_dev_attrs_group = { .attrs = fcloop_dev_attrs, }; static const struct attribute_group *fcloop_dev_attr_groups[] = { &fclopp_dev_attrs_group, NULL, }; static struct class *fcloop_class; static struct device *fcloop_device; static int __init fcloop_init(void) { int ret; fcloop_class = class_create("fcloop"); if (IS_ERR(fcloop_class)) { pr_err("couldn't register class fcloop\n"); ret = PTR_ERR(fcloop_class); return ret; } fcloop_device = device_create_with_groups( fcloop_class, NULL, MKDEV(0, 0), NULL, fcloop_dev_attr_groups, "ctl"); if (IS_ERR(fcloop_device)) { pr_err("couldn't create ctl device!\n"); ret = PTR_ERR(fcloop_device); goto out_destroy_class; } get_device(fcloop_device); return 0; out_destroy_class: class_destroy(fcloop_class); return ret; } static void __exit fcloop_exit(void) { struct fcloop_lport *lport = NULL; struct fcloop_nport *nport = NULL; struct fcloop_tport *tport; struct fcloop_rport *rport; unsigned long flags; int ret; spin_lock_irqsave(&fcloop_lock, flags); for (;;) { nport = list_first_entry_or_null(&fcloop_nports, typeof(*nport), nport_list); if (!nport) break; tport = __unlink_target_port(nport); rport = __unlink_remote_port(nport); spin_unlock_irqrestore(&fcloop_lock, flags); ret = __targetport_unreg(nport, tport); if (ret) pr_warn("%s: Failed deleting target port\n", __func__); ret = __remoteport_unreg(nport, rport); if (ret) pr_warn("%s: Failed deleting remote port\n", __func__); spin_lock_irqsave(&fcloop_lock, flags); } for (;;) { lport = list_first_entry_or_null(&fcloop_lports, typeof(*lport), lport_list); if (!lport) break; __unlink_local_port(lport); spin_unlock_irqrestore(&fcloop_lock, flags); ret = __wait_localport_unreg(lport); if (ret) pr_warn("%s: Failed deleting local port\n", __func__); spin_lock_irqsave(&fcloop_lock, flags); } spin_unlock_irqrestore(&fcloop_lock, flags); put_device(fcloop_device); device_destroy(fcloop_class, MKDEV(0, 0)); class_destroy(fcloop_class); } module_init(fcloop_init); module_exit(fcloop_exit); MODULE_LICENSE("GPL v2");
linux-master
drivers/nvme/target/fcloop.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics DH-HMAC-CHAP authentication command handling. * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions. * All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/blkdev.h> #include <linux/random.h> #include <linux/nvme-auth.h> #include <crypto/hash.h> #include <crypto/kpp.h> #include "nvmet.h" static void nvmet_auth_expired_work(struct work_struct *work) { struct nvmet_sq *sq = container_of(to_delayed_work(work), struct nvmet_sq, auth_expired_work); pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n", __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; sq->dhchap_tid = -1; } void nvmet_auth_sq_init(struct nvmet_sq *sq) { /* Initialize in-band authentication */ INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work); sq->authenticated = false; sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; } static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_auth_dhchap_negotiate_data *data = d; int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid; pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n", __func__, ctrl->cntlid, req->sq->qid, data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid, data->auth_protocol[0].dhchap.halen, data->auth_protocol[0].dhchap.dhlen); req->sq->dhchap_tid = le16_to_cpu(data->t_id); if (data->sc_c) return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; if (data->napd != 1) return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; if (data->auth_protocol[0].dhchap.authid != NVME_AUTH_DHCHAP_AUTH_ID) return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) { u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i]; if (!fallback_hash_id && crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0)) fallback_hash_id = host_hmac_id; if (ctrl->shash_id != host_hmac_id) continue; hash_id = ctrl->shash_id; break; } if (hash_id == 0) { if (fallback_hash_id == 0) { pr_debug("%s: ctrl %d qid %d: no usable hash found\n", __func__, ctrl->cntlid, req->sq->qid); return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; } pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n", __func__, ctrl->cntlid, req->sq->qid, nvme_auth_hmac_name(fallback_hash_id)); ctrl->shash_id = fallback_hash_id; } dhgid = -1; fallback_dhgid = -1; for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) { int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30]; if (tmp_dhgid != ctrl->dh_gid) { dhgid = tmp_dhgid; break; } if (fallback_dhgid < 0) { const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid); if (crypto_has_kpp(kpp, 0, 0)) fallback_dhgid = tmp_dhgid; } } if (dhgid < 0) { if (fallback_dhgid < 0) { pr_debug("%s: ctrl %d qid %d: no usable DH group found\n", __func__, ctrl->cntlid, req->sq->qid); return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; } pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n", __func__, ctrl->cntlid, req->sq->qid, nvme_auth_dhgroup_name(fallback_dhgid)); ctrl->dh_gid = fallback_dhgid; } pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n", __func__, ctrl->cntlid, req->sq->qid, nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid); return 0; } static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_auth_dhchap_reply_data *data = d; u16 dhvlen = le16_to_cpu(data->dhvlen); u8 *response; pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n", __func__, ctrl->cntlid, req->sq->qid, data->hl, data->cvalid, dhvlen); if (dhvlen) { if (!ctrl->dh_tfm) return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl, dhvlen) < 0) return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; } response = kmalloc(data->hl, GFP_KERNEL); if (!response) return NVME_AUTH_DHCHAP_FAILURE_FAILED; if (!ctrl->host_key) { pr_warn("ctrl %d qid %d no host key\n", ctrl->cntlid, req->sq->qid); kfree(response); return NVME_AUTH_DHCHAP_FAILURE_FAILED; } if (nvmet_auth_host_hash(req, response, data->hl) < 0) { pr_debug("ctrl %d qid %d host hash failed\n", ctrl->cntlid, req->sq->qid); kfree(response); return NVME_AUTH_DHCHAP_FAILURE_FAILED; } if (memcmp(data->rval, response, data->hl)) { pr_info("ctrl %d qid %d host response mismatch\n", ctrl->cntlid, req->sq->qid); kfree(response); return NVME_AUTH_DHCHAP_FAILURE_FAILED; } kfree(response); pr_debug("%s: ctrl %d qid %d host authenticated\n", __func__, ctrl->cntlid, req->sq->qid); if (data->cvalid) { req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl, GFP_KERNEL); if (!req->sq->dhchap_c2) return NVME_AUTH_DHCHAP_FAILURE_FAILED; pr_debug("%s: ctrl %d qid %d challenge %*ph\n", __func__, ctrl->cntlid, req->sq->qid, data->hl, req->sq->dhchap_c2); req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); } else { req->sq->authenticated = true; req->sq->dhchap_c2 = NULL; } return 0; } static u16 nvmet_auth_failure2(void *d) { struct nvmf_auth_dhchap_failure_data *data = d; return data->rescode_exp; } void nvmet_execute_auth_send(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_auth_dhchap_success2_data *data; void *d; u32 tl; u16 status = 0; if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, secp); goto done; } if (req->cmd->auth_send.spsp0 != 0x01) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp0); goto done; } if (req->cmd->auth_send.spsp1 != 0x01) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp1); goto done; } tl = le32_to_cpu(req->cmd->auth_send.tl); if (!tl) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, tl); goto done; } if (!nvmet_check_transfer_len(req, tl)) { pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl); return; } d = kmalloc(tl, GFP_KERNEL); if (!d) { status = NVME_SC_INTERNAL; goto done; } status = nvmet_copy_from_sgl(req, 0, d, tl); if (status) goto done_kfree; data = d; pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__, ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id, req->sq->dhchap_step); if (data->auth_type != NVME_AUTH_COMMON_MESSAGES && data->auth_type != NVME_AUTH_DHCHAP_MESSAGES) goto done_failure1; if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) { if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) { /* Restart negotiation */ pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__, ctrl->cntlid, req->sq->qid); if (!req->sq->qid) { if (nvmet_setup_auth(ctrl) < 0) { status = NVME_SC_INTERNAL; pr_err("ctrl %d qid 0 failed to setup" "re-authentication", ctrl->cntlid); goto done_failure1; } } req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; } else if (data->auth_id != req->sq->dhchap_step) goto done_failure1; /* Validate negotiation parameters */ status = nvmet_auth_negotiate(req, d); if (status == 0) req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; else { req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; req->sq->dhchap_status = status; status = 0; } goto done_kfree; } if (data->auth_id != req->sq->dhchap_step) { pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n", __func__, ctrl->cntlid, req->sq->qid, data->auth_id, req->sq->dhchap_step); goto done_failure1; } if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) { pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n", __func__, ctrl->cntlid, req->sq->qid, le16_to_cpu(data->t_id), req->sq->dhchap_tid); req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; goto done_kfree; } switch (data->auth_id) { case NVME_AUTH_DHCHAP_MESSAGE_REPLY: status = nvmet_auth_reply(req, d); if (status == 0) req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; else { req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; req->sq->dhchap_status = status; status = 0; } goto done_kfree; case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: req->sq->authenticated = true; pr_debug("%s: ctrl %d qid %d ctrl authenticated\n", __func__, ctrl->cntlid, req->sq->qid); goto done_kfree; case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: status = nvmet_auth_failure2(d); if (status) { pr_warn("ctrl %d qid %d: authentication failed (%d)\n", ctrl->cntlid, req->sq->qid, status); req->sq->dhchap_status = status; req->sq->authenticated = false; status = 0; } goto done_kfree; default: req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; req->sq->authenticated = false; goto done_kfree; } done_failure1: req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; done_kfree: kfree(d); done: pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_status, req->sq->dhchap_step); if (status) pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n", __func__, ctrl->cntlid, req->sq->qid, status, req->error_loc); req->cqe->result.u64 = 0; nvmet_req_complete(req, status); if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; mod_delayed_work(system_wq, &req->sq->auth_expired_work, auth_expire_secs * HZ); return; } /* Final states, clear up variables */ nvmet_auth_sq_free(req->sq); if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) nvmet_ctrl_fatal_error(ctrl); } static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al) { struct nvmf_auth_dhchap_challenge_data *data = d; struct nvmet_ctrl *ctrl = req->sq->ctrl; int ret = 0; int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); int data_size = sizeof(*d) + hash_len; if (ctrl->dh_tfm) data_size += ctrl->dh_keysize; if (al < data_size) { pr_debug("%s: buffer too small (al %d need %d)\n", __func__, al, data_size); return -EINVAL; } memset(data, 0, data_size); req->sq->dhchap_s1 = nvme_auth_get_seqnum(); data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; data->t_id = cpu_to_le16(req->sq->dhchap_tid); data->hashid = ctrl->shash_id; data->hl = hash_len; data->seqnum = cpu_to_le32(req->sq->dhchap_s1); req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL); if (!req->sq->dhchap_c1) return -ENOMEM; get_random_bytes(req->sq->dhchap_c1, data->hl); memcpy(data->cval, req->sq->dhchap_c1, data->hl); if (ctrl->dh_tfm) { data->dhgid = ctrl->dh_gid; data->dhvlen = cpu_to_le16(ctrl->dh_keysize); ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl, ctrl->dh_keysize); } pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n", __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, req->sq->dhchap_tid, data->hl, ctrl->dh_keysize); return ret; } static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al) { struct nvmf_auth_dhchap_success1_data *data = d; struct nvmet_ctrl *ctrl = req->sq->ctrl; int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); WARN_ON(al < sizeof(*data)); memset(data, 0, sizeof(*data)); data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; data->t_id = cpu_to_le16(req->sq->dhchap_tid); data->hl = hash_len; if (req->sq->dhchap_c2) { if (!ctrl->ctrl_key) { pr_warn("ctrl %d qid %d no ctrl key\n", ctrl->cntlid, req->sq->qid); return NVME_AUTH_DHCHAP_FAILURE_FAILED; } if (nvmet_auth_ctrl_hash(req, data->rval, data->hl)) return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; data->rvalid = 1; pr_debug("ctrl %d qid %d response %*ph\n", ctrl->cntlid, req->sq->qid, data->hl, data->rval); } return 0; } static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al) { struct nvmf_auth_dhchap_failure_data *data = d; WARN_ON(al < sizeof(*data)); data->auth_type = NVME_AUTH_COMMON_MESSAGES; data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; data->t_id = cpu_to_le16(req->sq->dhchap_tid); data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; data->rescode_exp = req->sq->dhchap_status; } void nvmet_execute_auth_receive(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; void *d; u32 al; u16 status = 0; if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, secp); goto done; } if (req->cmd->auth_receive.spsp0 != 0x01) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp0); goto done; } if (req->cmd->auth_receive.spsp1 != 0x01) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp1); goto done; } al = le32_to_cpu(req->cmd->auth_receive.al); if (!al) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, al); goto done; } if (!nvmet_check_transfer_len(req, al)) { pr_debug("%s: transfer length mismatch (%u)\n", __func__, al); return; } d = kmalloc(al, GFP_KERNEL); if (!d) { status = NVME_SC_INTERNAL; goto done; } pr_debug("%s: ctrl %d qid %d step %x\n", __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); switch (req->sq->dhchap_step) { case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE: if (nvmet_auth_challenge(req, d, al) < 0) { pr_warn("ctrl %d qid %d: challenge error (%d)\n", ctrl->cntlid, req->sq->qid, status); status = NVME_SC_INTERNAL; break; } req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY; break; case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1: status = nvmet_auth_success1(req, d, al); if (status) { req->sq->dhchap_status = status; req->sq->authenticated = false; nvmet_auth_failure1(req, d, al); pr_warn("ctrl %d qid %d: success1 status (%x)\n", ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); break; } req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; break; case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1: req->sq->authenticated = false; nvmet_auth_failure1(req, d, al); pr_warn("ctrl %d qid %d failure1 (%x)\n", ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); break; default: pr_warn("ctrl %d qid %d unhandled step (%d)\n", ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED; nvmet_auth_failure1(req, d, al); status = 0; break; } status = nvmet_copy_to_sgl(req, 0, d, al); kfree(d); done: req->cqe->result.u64 = 0; nvmet_req_complete(req, status); if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) nvmet_auth_sq_free(req->sq); else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { nvmet_auth_sq_free(req->sq); nvmet_ctrl_fatal_error(ctrl); } }
linux-master
drivers/nvme/target/fabrics-cmd-auth.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016 Avago Technologies. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/blk-mq.h> #include <linux/parser.h> #include <linux/random.h> #include <uapi/scsi/fc/fc_fs.h> #include <uapi/scsi/fc/fc_els.h> #include "nvmet.h" #include <linux/nvme-fc-driver.h> #include <linux/nvme-fc.h> #include "../host/fc.h" /* *************************** Data Structures/Defines ****************** */ #define NVMET_LS_CTX_COUNT 256 struct nvmet_fc_tgtport; struct nvmet_fc_tgt_assoc; struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ struct nvmefc_ls_rsp *lsrsp; struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ struct nvmet_fc_tgtport *tgtport; struct nvmet_fc_tgt_assoc *assoc; void *hosthandle; union nvmefc_ls_requests *rqstbuf; union nvmefc_ls_responses *rspbuf; u16 rqstdatalen; dma_addr_t rspdma; struct scatterlist sg[2]; struct work_struct work; } __aligned(sizeof(unsigned long long)); struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ struct nvmefc_ls_req ls_req; struct nvmet_fc_tgtport *tgtport; void *hosthandle; int ls_error; struct list_head lsreq_list; /* tgtport->ls_req_list */ bool req_queued; }; /* desired maximum for a single sequence - if sg list allows it */ #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) enum nvmet_fcp_datadir { NVMET_FCP_NODATA, NVMET_FCP_WRITE, NVMET_FCP_READ, NVMET_FCP_ABORTED, }; struct nvmet_fc_fcp_iod { struct nvmefc_tgt_fcp_req *fcpreq; struct nvme_fc_cmd_iu cmdiubuf; struct nvme_fc_ersp_iu rspiubuf; dma_addr_t rspdma; struct scatterlist *next_sg; struct scatterlist *data_sg; int data_sg_cnt; u32 offset; enum nvmet_fcp_datadir io_dir; bool active; bool abort; bool aborted; bool writedataactive; spinlock_t flock; struct nvmet_req req; struct work_struct defer_work; struct nvmet_fc_tgtport *tgtport; struct nvmet_fc_tgt_queue *queue; struct list_head fcp_list; /* tgtport->fcp_list */ }; struct nvmet_fc_tgtport { struct nvmet_fc_target_port fc_target_port; struct list_head tgt_list; /* nvmet_fc_target_list */ struct device *dev; /* dev for dma mapping */ struct nvmet_fc_target_template *ops; struct nvmet_fc_ls_iod *iod; spinlock_t lock; struct list_head ls_rcv_list; struct list_head ls_req_list; struct list_head ls_busylist; struct list_head assoc_list; struct list_head host_list; struct ida assoc_cnt; struct nvmet_fc_port_entry *pe; struct kref ref; u32 max_sg_cnt; }; struct nvmet_fc_port_entry { struct nvmet_fc_tgtport *tgtport; struct nvmet_port *port; u64 node_name; u64 port_name; struct list_head pe_list; }; struct nvmet_fc_defer_fcp_req { struct list_head req_list; struct nvmefc_tgt_fcp_req *fcp_req; }; struct nvmet_fc_tgt_queue { bool ninetypercent; u16 qid; u16 sqsize; u16 ersp_ratio; __le16 sqhd; atomic_t connected; atomic_t sqtail; atomic_t zrspcnt; atomic_t rsn; spinlock_t qlock; struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvmet_fc_tgt_assoc *assoc; struct list_head fod_list; struct list_head pending_cmd_list; struct list_head avail_defer_list; struct workqueue_struct *work_q; struct kref ref; struct rcu_head rcu; struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ } __aligned(sizeof(unsigned long long)); struct nvmet_fc_hostport { struct nvmet_fc_tgtport *tgtport; void *hosthandle; struct list_head host_list; struct kref ref; u8 invalid; }; struct nvmet_fc_tgt_assoc { u64 association_id; u32 a_id; atomic_t terminating; struct nvmet_fc_tgtport *tgtport; struct nvmet_fc_hostport *hostport; struct nvmet_fc_ls_iod *rcv_disconn; struct list_head a_list; struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1]; struct kref ref; struct work_struct del_work; struct rcu_head rcu; }; static inline int nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) { return (iodptr - iodptr->tgtport->iod); } static inline int nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) { return (fodptr - fodptr->queue->fod); } /* * Association and Connection IDs: * * Association ID will have random number in upper 6 bytes and zero * in lower 2 bytes * * Connection IDs will be Association ID with QID or'd in lower 2 bytes * * note: Association ID = Connection ID for queue 0 */ #define BYTES_FOR_QID sizeof(u16) #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) static inline u64 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) { return (assoc->association_id | qid); } static inline u64 nvmet_fc_getassociationid(u64 connectionid) { return connectionid & ~NVMET_FC_QUEUEID_MASK; } static inline u16 nvmet_fc_getqueueid(u64 connectionid) { return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); } static inline struct nvmet_fc_tgtport * targetport_to_tgtport(struct nvmet_fc_target_port *targetport) { return container_of(targetport, struct nvmet_fc_tgtport, fc_target_port); } static inline struct nvmet_fc_fcp_iod * nvmet_req_to_fod(struct nvmet_req *nvme_req) { return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); } /* *************************** Globals **************************** */ static DEFINE_SPINLOCK(nvmet_fc_tgtlock); static LIST_HEAD(nvmet_fc_target_list); static DEFINE_IDA(nvmet_fc_tgtport_cnt); static LIST_HEAD(nvmet_fc_portentry_list); static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod); static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod); /* *********************** FC-NVME DMA Handling **************************** */ /* * The fcloop device passes in a NULL device pointer. Real LLD's will * pass in a valid device pointer. If NULL is passed to the dma mapping * routines, depending on the platform, it may or may not succeed, and * may crash. * * As such: * Wrapper all the dma routines and check the dev pointer. * * If simple mappings (return just a dma address, we'll noop them, * returning a dma address of 0. * * On more complex mappings (dma_map_sg), a pseudo routine fills * in the scatter list, setting all dma addresses to 0. */ static inline dma_addr_t fc_dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; } static inline int fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dev ? dma_mapping_error(dev, dma_addr) : 0; } static inline void fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (dev) dma_unmap_single(dev, addr, size, dir); } static inline void fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (dev) dma_sync_single_for_cpu(dev, addr, size, dir); } static inline void fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (dev) dma_sync_single_for_device(dev, addr, size, dir); } /* pseudo dma_map_sg call */ static int fc_map_sg(struct scatterlist *sg, int nents) { struct scatterlist *s; int i; WARN_ON(nents == 0 || sg[0].length == 0); for_each_sg(sg, s, nents, i) { s->dma_address = 0L; #ifdef CONFIG_NEED_SG_DMA_LENGTH s->dma_length = s->length; #endif } return nents; } static inline int fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); } static inline void fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { if (dev) dma_unmap_sg(dev, sg, nents, dir); } /* ********************** FC-NVME LS XMT Handling ************************* */ static void __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) { struct nvmet_fc_tgtport *tgtport = lsop->tgtport; struct nvmefc_ls_req *lsreq = &lsop->ls_req; unsigned long flags; spin_lock_irqsave(&tgtport->lock, flags); if (!lsop->req_queued) { spin_unlock_irqrestore(&tgtport->lock, flags); return; } list_del(&lsop->lsreq_list); lsop->req_queued = false; spin_unlock_irqrestore(&tgtport->lock, flags); fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, (lsreq->rqstlen + lsreq->rsplen), DMA_BIDIRECTIONAL); nvmet_fc_tgtport_put(tgtport); } static int __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_req_op *lsop, void (*done)(struct nvmefc_ls_req *req, int status)) { struct nvmefc_ls_req *lsreq = &lsop->ls_req; unsigned long flags; int ret = 0; if (!tgtport->ops->ls_req) return -EOPNOTSUPP; if (!nvmet_fc_tgtport_get(tgtport)) return -ESHUTDOWN; lsreq->done = done; lsop->req_queued = false; INIT_LIST_HEAD(&lsop->lsreq_list); lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, lsreq->rqstlen + lsreq->rsplen, DMA_BIDIRECTIONAL); if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { ret = -EFAULT; goto out_puttgtport; } lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; spin_lock_irqsave(&tgtport->lock, flags); list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); lsop->req_queued = true; spin_unlock_irqrestore(&tgtport->lock, flags); ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, lsreq); if (ret) goto out_unlink; return 0; out_unlink: lsop->ls_error = ret; spin_lock_irqsave(&tgtport->lock, flags); lsop->req_queued = false; list_del(&lsop->lsreq_list); spin_unlock_irqrestore(&tgtport->lock, flags); fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, (lsreq->rqstlen + lsreq->rsplen), DMA_BIDIRECTIONAL); out_puttgtport: nvmet_fc_tgtport_put(tgtport); return ret; } static int nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_req_op *lsop, void (*done)(struct nvmefc_ls_req *req, int status)) { /* don't wait for completion */ return __nvmet_fc_send_ls_req(tgtport, lsop, done); } static void nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) { struct nvmet_fc_ls_req_op *lsop = container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); __nvmet_fc_finish_ls_req(lsop); /* fc-nvme target doesn't care about success or failure of cmd */ kfree(lsop); } /* * This routine sends a FC-NVME LS to disconnect (aka terminate) * the FC-NVME Association. Terminating the association also * terminates the FC-NVME connections (per queue, both admin and io * queues) that are part of the association. E.g. things are torn * down, and the related FC-NVME Association ID and Connection IDs * become invalid. * * The behavior of the fc-nvme target is such that it's * understanding of the association and connections will implicitly * be torn down. The action is implicit as it may be due to a loss of * connectivity with the fc-nvme host, so the target may never get a * response even if it tried. As such, the action of this routine * is to asynchronously send the LS, ignore any results of the LS, and * continue on with terminating the association. If the fc-nvme host * is present and receives the LS, it too can tear down. */ static void nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) { struct nvmet_fc_tgtport *tgtport = assoc->tgtport; struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; struct fcnvme_ls_disconnect_assoc_acc *discon_acc; struct nvmet_fc_ls_req_op *lsop; struct nvmefc_ls_req *lsreq; int ret; /* * If ls_req is NULL or no hosthandle, it's an older lldd and no * message is normal. Otherwise, send unless the hostport has * already been invalidated by the lldd. */ if (!tgtport->ops->ls_req || !assoc->hostport || assoc->hostport->invalid) return; lsop = kzalloc((sizeof(*lsop) + sizeof(*discon_rqst) + sizeof(*discon_acc) + tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); if (!lsop) { dev_info(tgtport->dev, "{%d:%d} send Disconnect Association failed: ENOMEM\n", tgtport->fc_target_port.port_num, assoc->a_id); return; } discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; lsreq = &lsop->ls_req; if (tgtport->ops->lsrqst_priv_sz) lsreq->private = (void *)&discon_acc[1]; else lsreq->private = NULL; lsop->tgtport = tgtport; lsop->hosthandle = assoc->hostport->hosthandle; nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, assoc->association_id); ret = nvmet_fc_send_ls_req_async(tgtport, lsop, nvmet_fc_disconnect_assoc_done); if (ret) { dev_info(tgtport->dev, "{%d:%d} XMT Disconnect Association failed: %d\n", tgtport->fc_target_port.port_num, assoc->a_id, ret); kfree(lsop); } } /* *********************** FC-NVME Port Management ************************ */ static int nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) { struct nvmet_fc_ls_iod *iod; int i; iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), GFP_KERNEL); if (!iod) return -ENOMEM; tgtport->iod = iod; for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); iod->tgtport = tgtport; list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + sizeof(union nvmefc_ls_responses), GFP_KERNEL); if (!iod->rqstbuf) goto out_fail; iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, sizeof(*iod->rspbuf), DMA_TO_DEVICE); if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) goto out_fail; } return 0; out_fail: kfree(iod->rqstbuf); list_del(&iod->ls_rcv_list); for (iod--, i--; i >= 0; iod--, i--) { fc_dma_unmap_single(tgtport->dev, iod->rspdma, sizeof(*iod->rspbuf), DMA_TO_DEVICE); kfree(iod->rqstbuf); list_del(&iod->ls_rcv_list); } kfree(iod); return -EFAULT; } static void nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) { struct nvmet_fc_ls_iod *iod = tgtport->iod; int i; for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { fc_dma_unmap_single(tgtport->dev, iod->rspdma, sizeof(*iod->rspbuf), DMA_TO_DEVICE); kfree(iod->rqstbuf); list_del(&iod->ls_rcv_list); } kfree(tgtport->iod); } static struct nvmet_fc_ls_iod * nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) { struct nvmet_fc_ls_iod *iod; unsigned long flags; spin_lock_irqsave(&tgtport->lock, flags); iod = list_first_entry_or_null(&tgtport->ls_rcv_list, struct nvmet_fc_ls_iod, ls_rcv_list); if (iod) list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); spin_unlock_irqrestore(&tgtport->lock, flags); return iod; } static void nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { unsigned long flags; spin_lock_irqsave(&tgtport->lock, flags); list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); spin_unlock_irqrestore(&tgtport->lock, flags); } static void nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_tgt_queue *queue) { struct nvmet_fc_fcp_iod *fod = queue->fod; int i; for (i = 0; i < queue->sqsize; fod++, i++) { INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); fod->tgtport = tgtport; fod->queue = queue; fod->active = false; fod->abort = false; fod->aborted = false; fod->fcpreq = NULL; list_add_tail(&fod->fcp_list, &queue->fod_list); spin_lock_init(&fod->flock); fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, sizeof(fod->rspiubuf), DMA_TO_DEVICE); if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { list_del(&fod->fcp_list); for (fod--, i--; i >= 0; fod--, i--) { fc_dma_unmap_single(tgtport->dev, fod->rspdma, sizeof(fod->rspiubuf), DMA_TO_DEVICE); fod->rspdma = 0L; list_del(&fod->fcp_list); } return; } } } static void nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_tgt_queue *queue) { struct nvmet_fc_fcp_iod *fod = queue->fod; int i; for (i = 0; i < queue->sqsize; fod++, i++) { if (fod->rspdma) fc_dma_unmap_single(tgtport->dev, fod->rspdma, sizeof(fod->rspiubuf), DMA_TO_DEVICE); } } static struct nvmet_fc_fcp_iod * nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) { struct nvmet_fc_fcp_iod *fod; lockdep_assert_held(&queue->qlock); fod = list_first_entry_or_null(&queue->fod_list, struct nvmet_fc_fcp_iod, fcp_list); if (fod) { list_del(&fod->fcp_list); fod->active = true; /* * no queue reference is taken, as it was taken by the * queue lookup just prior to the allocation. The iod * will "inherit" that reference. */ } return fod; } static void nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_tgt_queue *queue, struct nvmefc_tgt_fcp_req *fcpreq) { struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; /* * put all admin cmds on hw queue id 0. All io commands go to * the respective hw queue based on a modulo basis */ fcpreq->hwqid = queue->qid ? ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; nvmet_fc_handle_fcp_rqst(tgtport, fod); } static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) { struct nvmet_fc_fcp_iod *fod = container_of(work, struct nvmet_fc_fcp_iod, defer_work); /* Submit deferred IO for processing */ nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); } static void nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, struct nvmet_fc_fcp_iod *fod) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; struct nvmet_fc_tgtport *tgtport = fod->tgtport; struct nvmet_fc_defer_fcp_req *deferfcp; unsigned long flags; fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, sizeof(fod->rspiubuf), DMA_TO_DEVICE); fcpreq->nvmet_fc_private = NULL; fod->active = false; fod->abort = false; fod->aborted = false; fod->writedataactive = false; fod->fcpreq = NULL; tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); /* release the queue lookup reference on the completed IO */ nvmet_fc_tgt_q_put(queue); spin_lock_irqsave(&queue->qlock, flags); deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, struct nvmet_fc_defer_fcp_req, req_list); if (!deferfcp) { list_add_tail(&fod->fcp_list, &fod->queue->fod_list); spin_unlock_irqrestore(&queue->qlock, flags); return; } /* Re-use the fod for the next pending cmd that was deferred */ list_del(&deferfcp->req_list); fcpreq = deferfcp->fcp_req; /* deferfcp can be reused for another IO at a later date */ list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); spin_unlock_irqrestore(&queue->qlock, flags); /* Save NVME CMD IO in fod */ memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); /* Setup new fcpreq to be processed */ fcpreq->rspaddr = NULL; fcpreq->rsplen = 0; fcpreq->nvmet_fc_private = fod; fod->fcpreq = fcpreq; fod->active = true; /* inform LLDD IO is now being processed */ tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); /* * Leave the queue lookup get reference taken when * fod was originally allocated. */ queue_work(queue->work_q, &fod->defer_work); } static struct nvmet_fc_tgt_queue * nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, u16 qid, u16 sqsize) { struct nvmet_fc_tgt_queue *queue; int ret; if (qid > NVMET_NR_QUEUES) return NULL; queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); if (!queue) return NULL; if (!nvmet_fc_tgt_a_get(assoc)) goto out_free_queue; queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, assoc->tgtport->fc_target_port.port_num, assoc->a_id, qid); if (!queue->work_q) goto out_a_put; queue->qid = qid; queue->sqsize = sqsize; queue->assoc = assoc; INIT_LIST_HEAD(&queue->fod_list); INIT_LIST_HEAD(&queue->avail_defer_list); INIT_LIST_HEAD(&queue->pending_cmd_list); atomic_set(&queue->connected, 0); atomic_set(&queue->sqtail, 0); atomic_set(&queue->rsn, 1); atomic_set(&queue->zrspcnt, 0); spin_lock_init(&queue->qlock); kref_init(&queue->ref); nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); ret = nvmet_sq_init(&queue->nvme_sq); if (ret) goto out_fail_iodlist; WARN_ON(assoc->queues[qid]); rcu_assign_pointer(assoc->queues[qid], queue); return queue; out_fail_iodlist: nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); destroy_workqueue(queue->work_q); out_a_put: nvmet_fc_tgt_a_put(assoc); out_free_queue: kfree(queue); return NULL; } static void nvmet_fc_tgt_queue_free(struct kref *ref) { struct nvmet_fc_tgt_queue *queue = container_of(ref, struct nvmet_fc_tgt_queue, ref); rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL); nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); nvmet_fc_tgt_a_put(queue->assoc); destroy_workqueue(queue->work_q); kfree_rcu(queue, rcu); } static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) { kref_put(&queue->ref, nvmet_fc_tgt_queue_free); } static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) { return kref_get_unless_zero(&queue->ref); } static void nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) { struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; struct nvmet_fc_fcp_iod *fod = queue->fod; struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; unsigned long flags; int i; bool disconnect; disconnect = atomic_xchg(&queue->connected, 0); /* if not connected, nothing to do */ if (!disconnect) return; spin_lock_irqsave(&queue->qlock, flags); /* abort outstanding io's */ for (i = 0; i < queue->sqsize; fod++, i++) { if (fod->active) { spin_lock(&fod->flock); fod->abort = true; /* * only call lldd abort routine if waiting for * writedata. other outstanding ops should finish * on their own. */ if (fod->writedataactive) { fod->aborted = true; spin_unlock(&fod->flock); tgtport->ops->fcp_abort( &tgtport->fc_target_port, fod->fcpreq); } else spin_unlock(&fod->flock); } } /* Cleanup defer'ed IOs in queue */ list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, req_list) { list_del(&deferfcp->req_list); kfree(deferfcp); } for (;;) { deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, struct nvmet_fc_defer_fcp_req, req_list); if (!deferfcp) break; list_del(&deferfcp->req_list); spin_unlock_irqrestore(&queue->qlock, flags); tgtport->ops->defer_rcv(&tgtport->fc_target_port, deferfcp->fcp_req); tgtport->ops->fcp_abort(&tgtport->fc_target_port, deferfcp->fcp_req); tgtport->ops->fcp_req_release(&tgtport->fc_target_port, deferfcp->fcp_req); /* release the queue lookup reference */ nvmet_fc_tgt_q_put(queue); kfree(deferfcp); spin_lock_irqsave(&queue->qlock, flags); } spin_unlock_irqrestore(&queue->qlock, flags); flush_workqueue(queue->work_q); nvmet_sq_destroy(&queue->nvme_sq); nvmet_fc_tgt_q_put(queue); } static struct nvmet_fc_tgt_queue * nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, u64 connection_id) { struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_queue *queue; u64 association_id = nvmet_fc_getassociationid(connection_id); u16 qid = nvmet_fc_getqueueid(connection_id); if (qid > NVMET_NR_QUEUES) return NULL; rcu_read_lock(); list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { queue = rcu_dereference(assoc->queues[qid]); if (queue && (!atomic_read(&queue->connected) || !nvmet_fc_tgt_q_get(queue))) queue = NULL; rcu_read_unlock(); return queue; } } rcu_read_unlock(); return NULL; } static void nvmet_fc_hostport_free(struct kref *ref) { struct nvmet_fc_hostport *hostport = container_of(ref, struct nvmet_fc_hostport, ref); struct nvmet_fc_tgtport *tgtport = hostport->tgtport; unsigned long flags; spin_lock_irqsave(&tgtport->lock, flags); list_del(&hostport->host_list); spin_unlock_irqrestore(&tgtport->lock, flags); if (tgtport->ops->host_release && hostport->invalid) tgtport->ops->host_release(hostport->hosthandle); kfree(hostport); nvmet_fc_tgtport_put(tgtport); } static void nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) { kref_put(&hostport->ref, nvmet_fc_hostport_free); } static int nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) { return kref_get_unless_zero(&hostport->ref); } static void nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) { /* if LLDD not implemented, leave as NULL */ if (!hostport || !hostport->hosthandle) return; nvmet_fc_hostport_put(hostport); } static struct nvmet_fc_hostport * nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { struct nvmet_fc_hostport *host; lockdep_assert_held(&tgtport->lock); list_for_each_entry(host, &tgtport->host_list, host_list) { if (host->hosthandle == hosthandle && !host->invalid) { if (nvmet_fc_hostport_get(host)) return (host); } } return NULL; } static struct nvmet_fc_hostport * nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { struct nvmet_fc_hostport *newhost, *match = NULL; unsigned long flags; /* if LLDD not implemented, leave as NULL */ if (!hosthandle) return NULL; /* * take reference for what will be the newly allocated hostport if * we end up using a new allocation */ if (!nvmet_fc_tgtport_get(tgtport)) return ERR_PTR(-EINVAL); spin_lock_irqsave(&tgtport->lock, flags); match = nvmet_fc_match_hostport(tgtport, hosthandle); spin_unlock_irqrestore(&tgtport->lock, flags); if (match) { /* no new allocation - release reference */ nvmet_fc_tgtport_put(tgtport); return match; } newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); if (!newhost) { /* no new allocation - release reference */ nvmet_fc_tgtport_put(tgtport); return ERR_PTR(-ENOMEM); } spin_lock_irqsave(&tgtport->lock, flags); match = nvmet_fc_match_hostport(tgtport, hosthandle); if (match) { /* new allocation not needed */ kfree(newhost); newhost = match; /* no new allocation - release reference */ nvmet_fc_tgtport_put(tgtport); } else { newhost->tgtport = tgtport; newhost->hosthandle = hosthandle; INIT_LIST_HEAD(&newhost->host_list); kref_init(&newhost->ref); list_add_tail(&newhost->host_list, &tgtport->host_list); } spin_unlock_irqrestore(&tgtport->lock, flags); return newhost; } static void nvmet_fc_delete_assoc(struct work_struct *work) { struct nvmet_fc_tgt_assoc *assoc = container_of(work, struct nvmet_fc_tgt_assoc, del_work); nvmet_fc_delete_target_assoc(assoc); nvmet_fc_tgt_a_put(assoc); } static struct nvmet_fc_tgt_assoc * nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; unsigned long flags; u64 ran; int idx; bool needrandom = true; assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); if (!assoc) return NULL; idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); if (idx < 0) goto out_free_assoc; if (!nvmet_fc_tgtport_get(tgtport)) goto out_ida; assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); if (IS_ERR(assoc->hostport)) goto out_put; assoc->tgtport = tgtport; assoc->a_id = idx; INIT_LIST_HEAD(&assoc->a_list); kref_init(&assoc->ref); INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); atomic_set(&assoc->terminating, 0); while (needrandom) { get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); ran = ran << BYTES_FOR_QID_SHIFT; spin_lock_irqsave(&tgtport->lock, flags); needrandom = false; list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { if (ran == tmpassoc->association_id) { needrandom = true; break; } } if (!needrandom) { assoc->association_id = ran; list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); } spin_unlock_irqrestore(&tgtport->lock, flags); } return assoc; out_put: nvmet_fc_tgtport_put(tgtport); out_ida: ida_free(&tgtport->assoc_cnt, idx); out_free_assoc: kfree(assoc); return NULL; } static void nvmet_fc_target_assoc_free(struct kref *ref) { struct nvmet_fc_tgt_assoc *assoc = container_of(ref, struct nvmet_fc_tgt_assoc, ref); struct nvmet_fc_tgtport *tgtport = assoc->tgtport; struct nvmet_fc_ls_iod *oldls; unsigned long flags; /* Send Disconnect now that all i/o has completed */ nvmet_fc_xmt_disconnect_assoc(assoc); nvmet_fc_free_hostport(assoc->hostport); spin_lock_irqsave(&tgtport->lock, flags); list_del_rcu(&assoc->a_list); oldls = assoc->rcv_disconn; spin_unlock_irqrestore(&tgtport->lock, flags); /* if pending Rcv Disconnect Association LS, send rsp now */ if (oldls) nvmet_fc_xmt_ls_rsp(tgtport, oldls); ida_free(&tgtport->assoc_cnt, assoc->a_id); dev_info(tgtport->dev, "{%d:%d} Association freed\n", tgtport->fc_target_port.port_num, assoc->a_id); kfree_rcu(assoc, rcu); nvmet_fc_tgtport_put(tgtport); } static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) { kref_put(&assoc->ref, nvmet_fc_target_assoc_free); } static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) { return kref_get_unless_zero(&assoc->ref); } static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) { struct nvmet_fc_tgtport *tgtport = assoc->tgtport; struct nvmet_fc_tgt_queue *queue; int i, terminating; terminating = atomic_xchg(&assoc->terminating, 1); /* if already terminating, do nothing */ if (terminating) return; for (i = NVMET_NR_QUEUES; i >= 0; i--) { rcu_read_lock(); queue = rcu_dereference(assoc->queues[i]); if (!queue) { rcu_read_unlock(); continue; } if (!nvmet_fc_tgt_q_get(queue)) { rcu_read_unlock(); continue; } rcu_read_unlock(); nvmet_fc_delete_target_queue(queue); nvmet_fc_tgt_q_put(queue); } dev_info(tgtport->dev, "{%d:%d} Association deleted\n", tgtport->fc_target_port.port_num, assoc->a_id); nvmet_fc_tgt_a_put(assoc); } static struct nvmet_fc_tgt_assoc * nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, u64 association_id) { struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_assoc *ret = NULL; rcu_read_lock(); list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { ret = assoc; if (!nvmet_fc_tgt_a_get(assoc)) ret = NULL; break; } } rcu_read_unlock(); return ret; } static void nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_port_entry *pe, struct nvmet_port *port) { lockdep_assert_held(&nvmet_fc_tgtlock); pe->tgtport = tgtport; tgtport->pe = pe; pe->port = port; port->priv = pe; pe->node_name = tgtport->fc_target_port.node_name; pe->port_name = tgtport->fc_target_port.port_name; INIT_LIST_HEAD(&pe->pe_list); list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); } static void nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) { unsigned long flags; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); if (pe->tgtport) pe->tgtport->pe = NULL; list_del(&pe->pe_list); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } /* * called when a targetport deregisters. Breaks the relationship * with the nvmet port, but leaves the port_entry in place so that * re-registration can resume operation. */ static void nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) { struct nvmet_fc_port_entry *pe; unsigned long flags; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); pe = tgtport->pe; if (pe) pe->tgtport = NULL; tgtport->pe = NULL; spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } /* * called when a new targetport is registered. Looks in the * existing nvmet port_entries to see if the nvmet layer is * configured for the targetport's wwn's. (the targetport existed, * nvmet configured, the lldd unregistered the tgtport, and is now * reregistering the same targetport). If so, set the nvmet port * port entry on the targetport. */ static void nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) { struct nvmet_fc_port_entry *pe; unsigned long flags; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { if (tgtport->fc_target_port.node_name == pe->node_name && tgtport->fc_target_port.port_name == pe->port_name) { WARN_ON(pe->tgtport); tgtport->pe = pe; pe->tgtport = tgtport; break; } } spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } /** * nvmet_fc_register_targetport - transport entry point called by an * LLDD to register the existence of a local * NVME subystem FC port. * @pinfo: pointer to information about the port to be registered * @template: LLDD entrypoints and operational parameters for the port * @dev: physical hardware device node port corresponds to. Will be * used for DMA mappings * @portptr: pointer to a local port pointer. Upon success, the routine * will allocate a nvme_fc_local_port structure and place its * address in the local port pointer. Upon failure, local port * pointer will be set to NULL. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure. */ int nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, struct nvmet_fc_target_template *template, struct device *dev, struct nvmet_fc_target_port **portptr) { struct nvmet_fc_tgtport *newrec; unsigned long flags; int ret, idx; if (!template->xmt_ls_rsp || !template->fcp_op || !template->fcp_abort || !template->fcp_req_release || !template->targetport_delete || !template->max_hw_queues || !template->max_sgl_segments || !template->max_dif_sgl_segments || !template->dma_boundary) { ret = -EINVAL; goto out_regtgt_failed; } newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), GFP_KERNEL); if (!newrec) { ret = -ENOMEM; goto out_regtgt_failed; } idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; goto out_fail_kfree; } if (!get_device(dev) && dev) { ret = -ENODEV; goto out_ida_put; } newrec->fc_target_port.node_name = pinfo->node_name; newrec->fc_target_port.port_name = pinfo->port_name; if (template->target_priv_sz) newrec->fc_target_port.private = &newrec[1]; else newrec->fc_target_port.private = NULL; newrec->fc_target_port.port_id = pinfo->port_id; newrec->fc_target_port.port_num = idx; INIT_LIST_HEAD(&newrec->tgt_list); newrec->dev = dev; newrec->ops = template; spin_lock_init(&newrec->lock); INIT_LIST_HEAD(&newrec->ls_rcv_list); INIT_LIST_HEAD(&newrec->ls_req_list); INIT_LIST_HEAD(&newrec->ls_busylist); INIT_LIST_HEAD(&newrec->assoc_list); INIT_LIST_HEAD(&newrec->host_list); kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); newrec->max_sg_cnt = template->max_sgl_segments; ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { ret = -ENOMEM; goto out_free_newrec; } nvmet_fc_portentry_rebind_tgt(newrec); spin_lock_irqsave(&nvmet_fc_tgtlock, flags); list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); *portptr = &newrec->fc_target_port; return 0; out_free_newrec: put_device(dev); out_ida_put: ida_free(&nvmet_fc_tgtport_cnt, idx); out_fail_kfree: kfree(newrec); out_regtgt_failed: *portptr = NULL; return ret; } EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); static void nvmet_fc_free_tgtport(struct kref *ref) { struct nvmet_fc_tgtport *tgtport = container_of(ref, struct nvmet_fc_tgtport, ref); struct device *dev = tgtport->dev; unsigned long flags; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); list_del(&tgtport->tgt_list); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_free_ls_iodlist(tgtport); /* let the LLDD know we've finished tearing it down */ tgtport->ops->targetport_delete(&tgtport->fc_target_port); ida_free(&nvmet_fc_tgtport_cnt, tgtport->fc_target_port.port_num); ida_destroy(&tgtport->assoc_cnt); kfree(tgtport); put_device(dev); } static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) { kref_put(&tgtport->ref, nvmet_fc_free_tgtport); } static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) { return kref_get_unless_zero(&tgtport->ref); } static void __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) { struct nvmet_fc_tgt_assoc *assoc; rcu_read_lock(); list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (!nvmet_fc_tgt_a_get(assoc)) continue; if (!queue_work(nvmet_wq, &assoc->del_work)) /* already deleting - release local reference */ nvmet_fc_tgt_a_put(assoc); } rcu_read_unlock(); } /** * nvmet_fc_invalidate_host - transport entry point called by an LLDD * to remove references to a hosthandle for LS's. * * The nvmet-fc layer ensures that any references to the hosthandle * on the targetport are forgotten (set to NULL). The LLDD will * typically call this when a login with a remote host port has been * lost, thus LS's for the remote host port are no longer possible. * * If an LS request is outstanding to the targetport/hosthandle (or * issued concurrently with the call to invalidate the host), the * LLDD is responsible for terminating/aborting the LS and completing * the LS request. It is recommended that these terminations/aborts * occur after calling to invalidate the host handle to avoid additional * retries by the nvmet-fc transport. The nvmet-fc transport may * continue to reference host handle while it cleans up outstanding * NVME associations. The nvmet-fc transport will call the * ops->host_release() callback to notify the LLDD that all references * are complete and the related host handle can be recovered. * Note: if there are no references, the callback may be called before * the invalidate host call returns. * * @target_port: pointer to the (registered) target port that a prior * LS was received on and which supplied the transport the * hosthandle. * @hosthandle: the handle (pointer) that represents the host port * that no longer has connectivity and that LS's should * no longer be directed to. */ void nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, void *hosthandle) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvmet_fc_tgt_assoc *assoc, *next; unsigned long flags; bool noassoc = true; spin_lock_irqsave(&tgtport->lock, flags); list_for_each_entry_safe(assoc, next, &tgtport->assoc_list, a_list) { if (!assoc->hostport || assoc->hostport->hosthandle != hosthandle) continue; if (!nvmet_fc_tgt_a_get(assoc)) continue; assoc->hostport->invalid = 1; noassoc = false; if (!queue_work(nvmet_wq, &assoc->del_work)) /* already deleting - release local reference */ nvmet_fc_tgt_a_put(assoc); } spin_unlock_irqrestore(&tgtport->lock, flags); /* if there's nothing to wait for - call the callback */ if (noassoc && tgtport->ops->host_release) tgtport->ops->host_release(hosthandle); } EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); /* * nvmet layer has called to terminate an association */ static void nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) { struct nvmet_fc_tgtport *tgtport, *next; struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_queue *queue; unsigned long flags; bool found_ctrl = false; /* this is a bit ugly, but don't want to make locks layered */ spin_lock_irqsave(&nvmet_fc_tgtlock, flags); list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, tgt_list) { if (!nvmet_fc_tgtport_get(tgtport)) continue; spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); rcu_read_lock(); list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { queue = rcu_dereference(assoc->queues[0]); if (queue && queue->nvme_sq.ctrl == ctrl) { if (nvmet_fc_tgt_a_get(assoc)) found_ctrl = true; break; } } rcu_read_unlock(); nvmet_fc_tgtport_put(tgtport); if (found_ctrl) { if (!queue_work(nvmet_wq, &assoc->del_work)) /* already deleting - release local reference */ nvmet_fc_tgt_a_put(assoc); return; } spin_lock_irqsave(&nvmet_fc_tgtlock, flags); } spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } /** * nvmet_fc_unregister_targetport - transport entry point called by an * LLDD to deregister/remove a previously * registered a local NVME subsystem FC port. * @target_port: pointer to the (registered) target port that is to be * deregistered. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure. */ int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); nvmet_fc_portentry_unbind_tgt(tgtport); /* terminate any outstanding associations */ __nvmet_fc_free_assocs(tgtport); /* * should terminate LS's as well. However, LS's will be generated * at the tail end of association termination, so they likely don't * exist yet. And even if they did, it's worthwhile to just let * them finish and targetport ref counting will clean things up. */ nvmet_fc_tgtport_put(tgtport); return 0; } EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); /* ********************** FC-NVME LS RCV Handling ************************* */ static void nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; struct nvmet_fc_tgt_queue *queue; int ret = 0; memset(acc, 0, sizeof(*acc)); /* * FC-NVME spec changes. There are initiators sending different * lengths as padding sizes for Create Association Cmd descriptor * was incorrect. * Accept anything of "minimum" length. Assume format per 1.15 * spec (with HOSTID reduced to 16 bytes), ignore how long the * trailing pad length is. */ if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) ret = VERR_CR_ASSOC_LEN; else if (be32_to_cpu(rqst->desc_list_len) < FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) ret = VERR_CR_ASSOC_RQST_LEN; else if (rqst->assoc_cmd.desc_tag != cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) ret = VERR_CR_ASSOC_CMD; else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) ret = VERR_CR_ASSOC_CMD_LEN; else if (!rqst->assoc_cmd.ersp_ratio || (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= be16_to_cpu(rqst->assoc_cmd.sqsize))) ret = VERR_ERSP_RATIO; else { /* new association w/ admin queue */ iod->assoc = nvmet_fc_alloc_target_assoc( tgtport, iod->hosthandle); if (!iod->assoc) ret = VERR_ASSOC_ALLOC_FAIL; else { queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, be16_to_cpu(rqst->assoc_cmd.sqsize)); if (!queue) { ret = VERR_QUEUE_ALLOC_FAIL; nvmet_fc_tgt_a_put(iod->assoc); } } } if (ret) { dev_err(tgtport->dev, "Create Association LS failed: %s\n", validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); return; } queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); atomic_set(&queue->connected, 1); queue->sqhd = 0; /* best place to init value */ dev_info(tgtport->dev, "{%d:%d} Association created\n", tgtport->fc_target_port.port_num, iod->assoc->a_id); /* format a response */ iod->lsrsp->rsplen = sizeof(*acc); nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( sizeof(struct fcnvme_ls_cr_assoc_acc)), FCNVME_LS_CREATE_ASSOCIATION); acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); acc->associd.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_assoc_id)); acc->associd.association_id = cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); acc->connectid.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_conn_id)); acc->connectid.connection_id = acc->associd.association_id; } static void nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; struct nvmet_fc_tgt_queue *queue; int ret = 0; memset(acc, 0, sizeof(*acc)); if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) ret = VERR_CR_CONN_LEN; else if (rqst->desc_list_len != fcnvme_lsdesc_len( sizeof(struct fcnvme_ls_cr_conn_rqst))) ret = VERR_CR_CONN_RQST_LEN; else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) ret = VERR_ASSOC_ID; else if (rqst->associd.desc_len != fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_assoc_id))) ret = VERR_ASSOC_ID_LEN; else if (rqst->connect_cmd.desc_tag != cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) ret = VERR_CR_CONN_CMD; else if (rqst->connect_cmd.desc_len != fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) ret = VERR_CR_CONN_CMD_LEN; else if (!rqst->connect_cmd.ersp_ratio || (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= be16_to_cpu(rqst->connect_cmd.sqsize))) ret = VERR_ERSP_RATIO; else { /* new io queue */ iod->assoc = nvmet_fc_find_target_assoc(tgtport, be64_to_cpu(rqst->associd.association_id)); if (!iod->assoc) ret = VERR_NO_ASSOC; else { queue = nvmet_fc_alloc_target_queue(iod->assoc, be16_to_cpu(rqst->connect_cmd.qid), be16_to_cpu(rqst->connect_cmd.sqsize)); if (!queue) ret = VERR_QUEUE_ALLOC_FAIL; /* release get taken in nvmet_fc_find_target_assoc */ nvmet_fc_tgt_a_put(iod->assoc); } } if (ret) { dev_err(tgtport->dev, "Create Connection LS failed: %s\n", validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? FCNVME_RJT_RC_INV_ASSOC : FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); return; } queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); atomic_set(&queue->connected, 1); queue->sqhd = 0; /* best place to init value */ /* format a response */ iod->lsrsp->rsplen = sizeof(*acc); nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), FCNVME_LS_CREATE_CONNECTION); acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); acc->connectid.desc_len = fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_conn_id)); acc->connectid.connection_id = cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, be16_to_cpu(rqst->connect_cmd.qid))); } /* * Returns true if the LS response is to be transmit * Returns false if the LS response is to be delayed */ static int nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { struct fcnvme_ls_disconnect_assoc_rqst *rqst = &iod->rqstbuf->rq_dis_assoc; struct fcnvme_ls_disconnect_assoc_acc *acc = &iod->rspbuf->rsp_dis_assoc; struct nvmet_fc_tgt_assoc *assoc = NULL; struct nvmet_fc_ls_iod *oldls = NULL; unsigned long flags; int ret = 0; memset(acc, 0, sizeof(*acc)); ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); if (!ret) { /* match an active association - takes an assoc ref if !NULL */ assoc = nvmet_fc_find_target_assoc(tgtport, be64_to_cpu(rqst->associd.association_id)); iod->assoc = assoc; if (!assoc) ret = VERR_NO_ASSOC; } if (ret || !assoc) { dev_err(tgtport->dev, "Disconnect LS failed: %s\n", validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? FCNVME_RJT_RC_INV_ASSOC : FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); return true; } /* format a response */ iod->lsrsp->rsplen = sizeof(*acc); nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( sizeof(struct fcnvme_ls_disconnect_assoc_acc)), FCNVME_LS_DISCONNECT_ASSOC); /* release get taken in nvmet_fc_find_target_assoc */ nvmet_fc_tgt_a_put(assoc); /* * The rules for LS response says the response cannot * go back until ABTS's have been sent for all outstanding * I/O and a Disconnect Association LS has been sent. * So... save off the Disconnect LS to send the response * later. If there was a prior LS already saved, replace * it with the newer one and send a can't perform reject * on the older one. */ spin_lock_irqsave(&tgtport->lock, flags); oldls = assoc->rcv_disconn; assoc->rcv_disconn = iod; spin_unlock_irqrestore(&tgtport->lock, flags); nvmet_fc_delete_target_assoc(assoc); if (oldls) { dev_info(tgtport->dev, "{%d:%d} Multiple Disconnect Association LS's " "received\n", tgtport->fc_target_port.port_num, assoc->a_id); /* overwrite good response with bogus failure */ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, sizeof(*iod->rspbuf), /* ok to use rqst, LS is same */ rqst->w0.ls_cmd, FCNVME_RJT_RC_UNAB, FCNVME_RJT_EXP_NONE, 0); nvmet_fc_xmt_ls_rsp(tgtport, oldls); } return false; } /* *********************** NVME Ctrl Routines **************************** */ static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; static void nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) { struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; struct nvmet_fc_tgtport *tgtport = iod->tgtport; fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, sizeof(*iod->rspbuf), DMA_TO_DEVICE); nvmet_fc_free_ls_iod(tgtport, iod); nvmet_fc_tgtport_put(tgtport); } static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { int ret; fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, sizeof(*iod->rspbuf), DMA_TO_DEVICE); ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); if (ret) nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); } /* * Actual processing routine for received FC-NVME LS Requests from the LLD */ static void nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; bool sendrsp = true; iod->lsrsp->nvme_fc_private = iod; iod->lsrsp->rspbuf = iod->rspbuf; iod->lsrsp->rspdma = iod->rspdma; iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; /* Be preventative. handlers will later set to valid length */ iod->lsrsp->rsplen = 0; iod->assoc = NULL; /* * handlers: * parse request input, execute the request, and format the * LS response */ switch (w0->ls_cmd) { case FCNVME_LS_CREATE_ASSOCIATION: /* Creates Association and initial Admin Queue/Connection */ nvmet_fc_ls_create_association(tgtport, iod); break; case FCNVME_LS_CREATE_CONNECTION: /* Creates an IO Queue/Connection */ nvmet_fc_ls_create_connection(tgtport, iod); break; case FCNVME_LS_DISCONNECT_ASSOC: /* Terminate a Queue/Connection or the Association */ sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); break; default: iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, sizeof(*iod->rspbuf), w0->ls_cmd, FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); } if (sendrsp) nvmet_fc_xmt_ls_rsp(tgtport, iod); } /* * Actual processing routine for received FC-NVME LS Requests from the LLD */ static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work) { struct nvmet_fc_ls_iod *iod = container_of(work, struct nvmet_fc_ls_iod, work); struct nvmet_fc_tgtport *tgtport = iod->tgtport; nvmet_fc_handle_ls_rqst(tgtport, iod); } /** * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD * upon the reception of a NVME LS request. * * The nvmet-fc layer will copy payload to an internal structure for * processing. As such, upon completion of the routine, the LLDD may * immediately free/reuse the LS request buffer passed in the call. * * If this routine returns error, the LLDD should abort the exchange. * * @target_port: pointer to the (registered) target port the LS was * received on. * @hosthandle: pointer to the host specific data, gets stored in iod. * @lsrsp: pointer to a lsrsp structure to be used to reference * the exchange corresponding to the LS. * @lsreqbuf: pointer to the buffer containing the LS Request * @lsreqbuf_len: length, in bytes, of the received LS request */ int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, void *hosthandle, struct nvmefc_ls_rsp *lsrsp, void *lsreqbuf, u32 lsreqbuf_len) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvmet_fc_ls_iod *iod; struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { dev_info(tgtport->dev, "RCV %s LS failed: payload too large (%d)\n", (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : "", lsreqbuf_len); return -E2BIG; } if (!nvmet_fc_tgtport_get(tgtport)) { dev_info(tgtport->dev, "RCV %s LS failed: target deleting\n", (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); return -ESHUTDOWN; } iod = nvmet_fc_alloc_ls_iod(tgtport); if (!iod) { dev_info(tgtport->dev, "RCV %s LS failed: context allocation failed\n", (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); nvmet_fc_tgtport_put(tgtport); return -ENOENT; } iod->lsrsp = lsrsp; iod->fcpreq = NULL; memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); iod->rqstdatalen = lsreqbuf_len; iod->hosthandle = hosthandle; queue_work(nvmet_wq, &iod->work); return 0; } EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); /* * ********************** * Start of FCP handling * ********************** */ static int nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) { struct scatterlist *sg; unsigned int nent; sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); if (!sg) goto out; fod->data_sg = sg; fod->data_sg_cnt = nent; fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, ((fod->io_dir == NVMET_FCP_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); /* note: write from initiator perspective */ fod->next_sg = fod->data_sg; return 0; out: return NVME_SC_INTERNAL; } static void nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) { if (!fod->data_sg || !fod->data_sg_cnt) return; fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, ((fod->io_dir == NVMET_FCP_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); sgl_free(fod->data_sg); fod->data_sg = NULL; fod->data_sg_cnt = 0; } static bool queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) { u32 sqtail, used; /* egad, this is ugly. And sqtail is just a best guess */ sqtail = atomic_read(&q->sqtail) % q->sqsize; used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); } /* * Prep RSP payload. * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op */ static void nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod) { struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; struct nvme_completion *cqe = &ersp->cqe; u32 *cqewd = (u32 *)cqe; bool send_ersp = false; u32 rsn, rspcnt, xfr_length; if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) xfr_length = fod->req.transfer_len; else xfr_length = fod->offset; /* * check to see if we can send a 0's rsp. * Note: to send a 0's response, the NVME-FC host transport will * recreate the CQE. The host transport knows: sq id, SQHD (last * seen in an ersp), and command_id. Thus it will create a * zero-filled CQE with those known fields filled in. Transport * must send an ersp for any condition where the cqe won't match * this. * * Here are the FC-NVME mandated cases where we must send an ersp: * every N responses, where N=ersp_ratio * force fabric commands to send ersp's (not in FC-NVME but good * practice) * normal cmds: any time status is non-zero, or status is zero * but words 0 or 1 are non-zero. * the SQ is 90% or more full * the cmd is a fused command * transferred data length not equal to cmd iu length */ rspcnt = atomic_inc_return(&fod->queue->zrspcnt); if (!(rspcnt % fod->queue->ersp_ratio) || nvme_is_fabrics((struct nvme_command *) sqe) || xfr_length != fod->req.transfer_len || (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) send_ersp = true; /* re-set the fields */ fod->fcpreq->rspaddr = ersp; fod->fcpreq->rspdma = fod->rspdma; if (!send_ersp) { memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; } else { ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); rsn = atomic_inc_return(&fod->queue->rsn); ersp->rsn = cpu_to_be32(rsn); ersp->xfrd_len = cpu_to_be32(xfr_length); fod->fcpreq->rsplen = sizeof(*ersp); } fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, sizeof(fod->rspiubuf), DMA_TO_DEVICE); } static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); static void nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; /* data no longer needed */ nvmet_fc_free_tgt_pgs(fod); /* * if an ABTS was received or we issued the fcp_abort early * don't call abort routine again. */ /* no need to take lock - lock was taken earlier to get here */ if (!fod->aborted) tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); nvmet_fc_free_fcp_iod(fod->queue, fod); } static void nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod) { int ret; fod->fcpreq->op = NVMET_FCOP_RSP; fod->fcpreq->timeout = 0; nvmet_fc_prep_fcp_rsp(tgtport, fod); ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); if (ret) nvmet_fc_abort_op(tgtport, fod); } static void nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod, u8 op) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; struct scatterlist *sg = fod->next_sg; unsigned long flags; u32 remaininglen = fod->req.transfer_len - fod->offset; u32 tlen = 0; int ret; fcpreq->op = op; fcpreq->offset = fod->offset; fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; /* * for next sequence: * break at a sg element boundary * attempt to keep sequence length capped at * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to * be longer if a single sg element is larger * than that amount. This is done to avoid creating * a new sg list to use for the tgtport api. */ fcpreq->sg = sg; fcpreq->sg_cnt = 0; while (tlen < remaininglen && fcpreq->sg_cnt < tgtport->max_sg_cnt && tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { fcpreq->sg_cnt++; tlen += sg_dma_len(sg); sg = sg_next(sg); } if (tlen < remaininglen && fcpreq->sg_cnt == 0) { fcpreq->sg_cnt++; tlen += min_t(u32, sg_dma_len(sg), remaininglen); sg = sg_next(sg); } if (tlen < remaininglen) fod->next_sg = sg; else fod->next_sg = NULL; fcpreq->transfer_length = tlen; fcpreq->transferred_length = 0; fcpreq->fcp_error = 0; fcpreq->rsplen = 0; /* * If the last READDATA request: check if LLDD supports * combined xfr with response. */ if ((op == NVMET_FCOP_READDATA) && ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { fcpreq->op = NVMET_FCOP_READDATA_RSP; nvmet_fc_prep_fcp_rsp(tgtport, fod); } ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); if (ret) { /* * should be ok to set w/o lock as its in the thread of * execution (not an async timer routine) and doesn't * contend with any clearing action */ fod->abort = true; if (op == NVMET_FCOP_WRITEDATA) { spin_lock_irqsave(&fod->flock, flags); fod->writedataactive = false; spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { fcpreq->fcp_error = ret; fcpreq->transferred_length = 0; nvmet_fc_xmt_fcp_op_done(fod->fcpreq); } } } static inline bool __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; struct nvmet_fc_tgtport *tgtport = fod->tgtport; /* if in the middle of an io and we need to tear down */ if (abort) { if (fcpreq->op == NVMET_FCOP_WRITEDATA) { nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return true; } nvmet_fc_abort_op(tgtport, fod); return true; } return false; } /* * actual done handler for FCP operations when completed by the lldd */ static void nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; struct nvmet_fc_tgtport *tgtport = fod->tgtport; unsigned long flags; bool abort; spin_lock_irqsave(&fod->flock, flags); abort = fod->abort; fod->writedataactive = false; spin_unlock_irqrestore(&fod->flock, flags); switch (fcpreq->op) { case NVMET_FCOP_WRITEDATA: if (__nvmet_fc_fod_op_abort(fod, abort)) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { spin_lock_irqsave(&fod->flock, flags); fod->abort = true; spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; } fod->offset += fcpreq->transferred_length; if (fod->offset != fod->req.transfer_len) { spin_lock_irqsave(&fod->flock, flags); fod->writedataactive = true; spin_unlock_irqrestore(&fod->flock, flags); /* transfer the next chunk */ nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); return; } /* data transfer complete, resume with nvmet layer */ fod->req.execute(&fod->req); break; case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA_RSP: if (__nvmet_fc_fod_op_abort(fod, abort)) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { nvmet_fc_abort_op(tgtport, fod); return; } /* success */ if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { /* data no longer needed */ nvmet_fc_free_tgt_pgs(fod); nvmet_fc_free_fcp_iod(fod->queue, fod); return; } fod->offset += fcpreq->transferred_length; if (fod->offset != fod->req.transfer_len) { /* transfer the next chunk */ nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_READDATA); return; } /* data transfer complete, send response */ /* data no longer needed */ nvmet_fc_free_tgt_pgs(fod); nvmet_fc_xmt_fcp_rsp(tgtport, fod); break; case NVMET_FCOP_RSP: if (__nvmet_fc_fod_op_abort(fod, abort)) return; nvmet_fc_free_fcp_iod(fod->queue, fod); break; default: break; } } static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) { struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; nvmet_fc_fod_op_done(fod); } /* * actual completion handler after execution by the nvmet layer */ static void __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod, int status) { struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; struct nvme_completion *cqe = &fod->rspiubuf.cqe; unsigned long flags; bool abort; spin_lock_irqsave(&fod->flock, flags); abort = fod->abort; spin_unlock_irqrestore(&fod->flock, flags); /* if we have a CQE, snoop the last sq_head value */ if (!status) fod->queue->sqhd = cqe->sq_head; if (abort) { nvmet_fc_abort_op(tgtport, fod); return; } /* if an error handling the cmd post initial parsing */ if (status) { /* fudge up a failed CQE status for our transport error */ memset(cqe, 0, sizeof(*cqe)); cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ cqe->sq_id = cpu_to_le16(fod->queue->qid); cqe->command_id = sqe->command_id; cqe->status = cpu_to_le16(status); } else { /* * try to push the data even if the SQE status is non-zero. * There may be a status where data still was intended to * be moved */ if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { /* push the data over before sending rsp */ nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_READDATA); return; } /* writes & no data - fall thru */ } /* data no longer needed */ nvmet_fc_free_tgt_pgs(fod); nvmet_fc_xmt_fcp_rsp(tgtport, fod); } static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) { struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); struct nvmet_fc_tgtport *tgtport = fod->tgtport; __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); } /* * Actual processing routine for received FC-NVME I/O Requests from the LLD */ static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod) { struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; u32 xfrlen = be32_to_cpu(cmdiu->data_len); int ret; /* * Fused commands are currently not supported in the linux * implementation. * * As such, the implementation of the FC transport does not * look at the fused commands and order delivery to the upper * layer until we have both based on csn. */ fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { fod->io_dir = NVMET_FCP_WRITE; if (!nvme_is_write(&cmdiu->sqe)) goto transport_error; } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { fod->io_dir = NVMET_FCP_READ; if (nvme_is_write(&cmdiu->sqe)) goto transport_error; } else { fod->io_dir = NVMET_FCP_NODATA; if (xfrlen) goto transport_error; } fod->req.cmd = &fod->cmdiubuf.sqe; fod->req.cqe = &fod->rspiubuf.cqe; if (tgtport->pe) fod->req.port = tgtport->pe->port; /* clear any response payload */ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); fod->data_sg = NULL; fod->data_sg_cnt = 0; ret = nvmet_req_init(&fod->req, &fod->queue->nvme_cq, &fod->queue->nvme_sq, &nvmet_fc_tgt_fcp_ops); if (!ret) { /* bad SQE content or invalid ctrl state */ /* nvmet layer has already called op done to send rsp. */ return; } fod->req.transfer_len = xfrlen; /* keep a running counter of tail position */ atomic_inc(&fod->queue->sqtail); if (fod->req.transfer_len) { ret = nvmet_fc_alloc_tgt_pgs(fod); if (ret) { nvmet_req_complete(&fod->req, ret); return; } } fod->req.sg = fod->data_sg; fod->req.sg_cnt = fod->data_sg_cnt; fod->offset = 0; if (fod->io_dir == NVMET_FCP_WRITE) { /* pull the data over before invoking nvmet layer */ nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); return; } /* * Reads or no data: * * can invoke the nvmet_layer now. If read data, cmd completion will * push the data */ fod->req.execute(&fod->req); return; transport_error: nvmet_fc_abort_op(tgtport, fod); } /** * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD * upon the reception of a NVME FCP CMD IU. * * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc * layer for processing. * * The nvmet_fc layer allocates a local job structure (struct * nvmet_fc_fcp_iod) from the queue for the io and copies the * CMD IU buffer to the job structure. As such, on a successful * completion (returns 0), the LLDD may immediately free/reuse * the CMD IU buffer passed in the call. * * However, in some circumstances, due to the packetized nature of FC * and the api of the FC LLDD which may issue a hw command to send the * response, but the LLDD may not get the hw completion for that command * and upcall the nvmet_fc layer before a new command may be * asynchronously received - its possible for a command to be received * before the LLDD and nvmet_fc have recycled the job structure. It gives * the appearance of more commands received than fits in the sq. * To alleviate this scenario, a temporary queue is maintained in the * transport for pending LLDD requests waiting for a queue job structure. * In these "overrun" cases, a temporary queue element is allocated * the LLDD request and CMD iu buffer information remembered, and the * routine returns a -EOVERFLOW status. Subsequently, when a queue job * structure is freed, it is immediately reallocated for anything on the * pending request list. The LLDDs defer_rcv() callback is called, * informing the LLDD that it may reuse the CMD IU buffer, and the io * is then started normally with the transport. * * The LLDD, when receiving an -EOVERFLOW completion status, is to treat * the completion as successful but must not reuse the CMD IU buffer * until the LLDD's defer_rcv() callback has been called for the * corresponding struct nvmefc_tgt_fcp_req pointer. * * If there is any other condition in which an error occurs, the * transport will return a non-zero status indicating the error. * In all cases other than -EOVERFLOW, the transport has not accepted the * request and the LLDD should abort the exchange. * * @target_port: pointer to the (registered) target port the FCP CMD IU * was received on. * @fcpreq: pointer to a fcpreq request structure to be used to reference * the exchange corresponding to the FCP Exchange. * @cmdiubuf: pointer to the buffer containing the FCP CMD IU * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU */ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, struct nvmefc_tgt_fcp_req *fcpreq, void *cmdiubuf, u32 cmdiubuf_len) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; struct nvmet_fc_tgt_queue *queue; struct nvmet_fc_fcp_iod *fod; struct nvmet_fc_defer_fcp_req *deferfcp; unsigned long flags; /* validate iu, so the connection id can be used to find the queue */ if ((cmdiubuf_len != sizeof(*cmdiu)) || (cmdiu->format_id != NVME_CMD_FORMAT_ID) || (cmdiu->fc_id != NVME_CMD_FC_ID) || (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) return -EIO; queue = nvmet_fc_find_target_queue(tgtport, be64_to_cpu(cmdiu->connection_id)); if (!queue) return -ENOTCONN; /* * note: reference taken by find_target_queue * After successful fod allocation, the fod will inherit the * ownership of that reference and will remove the reference * when the fod is freed. */ spin_lock_irqsave(&queue->qlock, flags); fod = nvmet_fc_alloc_fcp_iod(queue); if (fod) { spin_unlock_irqrestore(&queue->qlock, flags); fcpreq->nvmet_fc_private = fod; fod->fcpreq = fcpreq; memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); return 0; } if (!tgtport->ops->defer_rcv) { spin_unlock_irqrestore(&queue->qlock, flags); /* release the queue lookup reference */ nvmet_fc_tgt_q_put(queue); return -ENOENT; } deferfcp = list_first_entry_or_null(&queue->avail_defer_list, struct nvmet_fc_defer_fcp_req, req_list); if (deferfcp) { /* Just re-use one that was previously allocated */ list_del(&deferfcp->req_list); } else { spin_unlock_irqrestore(&queue->qlock, flags); /* Now we need to dynamically allocate one */ deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); if (!deferfcp) { /* release the queue lookup reference */ nvmet_fc_tgt_q_put(queue); return -ENOMEM; } spin_lock_irqsave(&queue->qlock, flags); } /* For now, use rspaddr / rsplen to save payload information */ fcpreq->rspaddr = cmdiubuf; fcpreq->rsplen = cmdiubuf_len; deferfcp->fcp_req = fcpreq; /* defer processing till a fod becomes available */ list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); /* NOTE: the queue lookup reference is still valid */ spin_unlock_irqrestore(&queue->qlock, flags); return -EOVERFLOW; } EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); /** * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD * upon the reception of an ABTS for a FCP command * * Notify the transport that an ABTS has been received for a FCP command * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The * LLDD believes the command is still being worked on * (template_ops->fcp_req_release() has not been called). * * The transport will wait for any outstanding work (an op to the LLDD, * which the lldd should complete with error due to the ABTS; or the * completion from the nvmet layer of the nvme command), then will * stop processing and call the nvmet_fc_rcv_fcp_req() callback to * return the i/o context to the LLDD. The LLDD may send the BA_ACC * to the ABTS either after return from this function (assuming any * outstanding op work has been terminated) or upon the callback being * called. * * @target_port: pointer to the (registered) target port the FCP CMD IU * was received on. * @fcpreq: pointer to the fcpreq request structure that corresponds * to the exchange that received the ABTS. */ void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, struct nvmefc_tgt_fcp_req *fcpreq) { struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; struct nvmet_fc_tgt_queue *queue; unsigned long flags; if (!fod || fod->fcpreq != fcpreq) /* job appears to have already completed, ignore abort */ return; queue = fod->queue; spin_lock_irqsave(&queue->qlock, flags); if (fod->active) { /* * mark as abort. The abort handler, invoked upon completion * of any work, will detect the aborted status and do the * callback. */ spin_lock(&fod->flock); fod->abort = true; fod->aborted = true; spin_unlock(&fod->flock); } spin_unlock_irqrestore(&queue->qlock, flags); } EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); struct nvmet_fc_traddr { u64 nn; u64 pn; }; static int __nvme_fc_parse_u64(substring_t *sstr, u64 *val) { u64 token64; if (match_u64(sstr, &token64)) return -EINVAL; *val = token64; return 0; } /* * This routine validates and extracts the WWN's from the TRADDR string. * As kernel parsers need the 0x to determine number base, universally * build string to parse with 0x prefix before parsing name strings. */ static int nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) { char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; substring_t wwn = { name, &name[sizeof(name)-1] }; int nnoffset, pnoffset; /* validate if string is one of the 2 allowed formats */ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { nnoffset = NVME_FC_TRADDR_OXNNLEN; pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + NVME_FC_TRADDR_OXNNLEN; } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], "pn-", NVME_FC_TRADDR_NNLEN))) { nnoffset = NVME_FC_TRADDR_NNLEN; pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; } else goto out_einval; name[0] = '0'; name[1] = 'x'; name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) goto out_einval; memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) goto out_einval; return 0; out_einval: pr_warn("%s: bad traddr string\n", __func__); return -EINVAL; } static int nvmet_fc_add_port(struct nvmet_port *port) { struct nvmet_fc_tgtport *tgtport; struct nvmet_fc_port_entry *pe; struct nvmet_fc_traddr traddr = { 0L, 0L }; unsigned long flags; int ret; /* validate the address info */ if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) return -EINVAL; /* map the traddr address info to a target port */ ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, sizeof(port->disc_addr.traddr)); if (ret) return ret; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return -ENOMEM; ret = -ENXIO; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { if ((tgtport->fc_target_port.node_name == traddr.nn) && (tgtport->fc_target_port.port_name == traddr.pn)) { /* a FC port can only be 1 nvmet port id */ if (!tgtport->pe) { nvmet_fc_portentry_bind(tgtport, pe, port); ret = 0; } else ret = -EALREADY; break; } } spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); if (ret) kfree(pe); return ret; } static void nvmet_fc_remove_port(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; nvmet_fc_portentry_unbind(pe); kfree(pe); } static void nvmet_fc_discovery_chg(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; struct nvmet_fc_tgtport *tgtport = pe->tgtport; if (tgtport && tgtport->ops->discovery_event) tgtport->ops->discovery_event(&tgtport->fc_target_port); } static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_FC, .msdbd = 1, .add_port = nvmet_fc_add_port, .remove_port = nvmet_fc_remove_port, .queue_response = nvmet_fc_fcp_nvme_cmd_done, .delete_ctrl = nvmet_fc_delete_ctrl, .discovery_chg = nvmet_fc_discovery_chg, }; static int __init nvmet_fc_init_module(void) { return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); } static void __exit nvmet_fc_exit_module(void) { /* sanity check - all lports should be removed */ if (!list_empty(&nvmet_fc_target_list)) pr_warn("%s: targetport list not empty\n", __func__); nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); ida_destroy(&nvmet_fc_tgtport_cnt); } module_init(nvmet_fc_init_module); module_exit(nvmet_fc_exit_module); MODULE_LICENSE("GPL v2");
linux-master
drivers/nvme/target/fc.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe Over Fabrics Target File I/O commands implementation. * Copyright (c) 2017-2018 Western Digital Corporation or its * affiliates. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/uio.h> #include <linux/falloc.h> #include <linux/file.h> #include <linux/fs.h> #include "nvmet.h" #define NVMET_MIN_MPOOL_OBJ 16 void nvmet_file_ns_revalidate(struct nvmet_ns *ns) { ns->size = i_size_read(ns->file->f_mapping->host); } void nvmet_file_ns_disable(struct nvmet_ns *ns) { if (ns->file) { if (ns->buffered_io) flush_workqueue(buffered_io_wq); mempool_destroy(ns->bvec_pool); ns->bvec_pool = NULL; fput(ns->file); ns->file = NULL; } } int nvmet_file_ns_enable(struct nvmet_ns *ns) { int flags = O_RDWR | O_LARGEFILE; int ret = 0; if (!ns->buffered_io) flags |= O_DIRECT; ns->file = filp_open(ns->device_path, flags, 0); if (IS_ERR(ns->file)) { ret = PTR_ERR(ns->file); pr_err("failed to open file %s: (%d)\n", ns->device_path, ret); ns->file = NULL; return ret; } nvmet_file_ns_revalidate(ns); /* * i_blkbits can be greater than the universally accepted upper bound, * so make sure we export a sane namespace lba_shift. */ ns->blksize_shift = min_t(u8, file_inode(ns->file)->i_blkbits, 12); ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab, mempool_free_slab, nvmet_bvec_cache); if (!ns->bvec_pool) { ret = -ENOMEM; goto err; } return ret; err: fput(ns->file); ns->file = NULL; ns->size = 0; ns->blksize_shift = 0; return ret; } static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, unsigned long nr_segs, size_t count, int ki_flags) { struct kiocb *iocb = &req->f.iocb; ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter); struct iov_iter iter; int rw; if (req->cmd->rw.opcode == nvme_cmd_write) { if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) ki_flags |= IOCB_DSYNC; call_iter = req->ns->file->f_op->write_iter; rw = ITER_SOURCE; } else { call_iter = req->ns->file->f_op->read_iter; rw = ITER_DEST; } iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); iocb->ki_pos = pos; iocb->ki_filp = req->ns->file; iocb->ki_flags = ki_flags | iocb->ki_filp->f_iocb_flags; return call_iter(iocb, &iter); } static void nvmet_file_io_done(struct kiocb *iocb, long ret) { struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); u16 status = NVME_SC_SUCCESS; if (req->f.bvec != req->inline_bvec) { if (likely(req->f.mpool_alloc == false)) kfree(req->f.bvec); else mempool_free(req->f.bvec, req->ns->bvec_pool); } if (unlikely(ret != req->transfer_len)) status = errno_to_nvme_status(req, ret); nvmet_req_complete(req, status); } static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) { ssize_t nr_bvec = req->sg_cnt; unsigned long bv_cnt = 0; bool is_sync = false; size_t len = 0, total_len = 0; ssize_t ret = 0; loff_t pos; int i; struct scatterlist *sg; if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) is_sync = true; pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; if (unlikely(pos + req->transfer_len > req->ns->size)) { nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); return true; } memset(&req->f.iocb, 0, sizeof(struct kiocb)); for_each_sg(req->sg, sg, req->sg_cnt, i) { bvec_set_page(&req->f.bvec[bv_cnt], sg_page(sg), sg->length, sg->offset); len += req->f.bvec[bv_cnt].bv_len; total_len += req->f.bvec[bv_cnt].bv_len; bv_cnt++; WARN_ON_ONCE((nr_bvec - 1) < 0); if (unlikely(is_sync) && (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) { ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0); if (ret < 0) goto complete; pos += len; bv_cnt = 0; len = 0; } nr_bvec--; } if (WARN_ON_ONCE(total_len != req->transfer_len)) { ret = -EIO; goto complete; } if (unlikely(is_sync)) { ret = total_len; goto complete; } /* * A NULL ki_complete ask for synchronous execution, which we want * for the IOCB_NOWAIT case. */ if (!(ki_flags & IOCB_NOWAIT)) req->f.iocb.ki_complete = nvmet_file_io_done; ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags); switch (ret) { case -EIOCBQUEUED: return true; case -EAGAIN: if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT))) goto complete; return false; case -EOPNOTSUPP: /* * For file systems returning error -EOPNOTSUPP, handle * IOCB_NOWAIT error case separately and retry without * IOCB_NOWAIT. */ if ((ki_flags & IOCB_NOWAIT)) return false; break; } complete: nvmet_file_io_done(&req->f.iocb, ret); return true; } static void nvmet_file_buffered_io_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); nvmet_file_execute_io(req, 0); } static void nvmet_file_submit_buffered_io(struct nvmet_req *req) { INIT_WORK(&req->f.work, nvmet_file_buffered_io_work); queue_work(buffered_io_wq, &req->f.work); } static void nvmet_file_execute_rw(struct nvmet_req *req) { ssize_t nr_bvec = req->sg_cnt; if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) return; if (!req->sg_cnt || !nr_bvec) { nvmet_req_complete(req, 0); return; } if (nr_bvec > NVMET_MAX_INLINE_BIOVEC) req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), GFP_KERNEL); else req->f.bvec = req->inline_bvec; if (unlikely(!req->f.bvec)) { /* fallback under memory pressure */ req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); req->f.mpool_alloc = true; } else req->f.mpool_alloc = false; if (req->ns->buffered_io) { if (likely(!req->f.mpool_alloc) && (req->ns->file->f_mode & FMODE_NOWAIT) && nvmet_file_execute_io(req, IOCB_NOWAIT)) return; nvmet_file_submit_buffered_io(req); } else nvmet_file_execute_io(req, 0); } u16 nvmet_file_flush(struct nvmet_req *req) { return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1)); } static void nvmet_file_flush_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); nvmet_req_complete(req, nvmet_file_flush(req)); } static void nvmet_file_execute_flush(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, 0)) return; INIT_WORK(&req->f.work, nvmet_file_flush_work); queue_work(nvmet_wq, &req->f.work); } static void nvmet_file_execute_discard(struct nvmet_req *req) { int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; struct nvme_dsm_range range; loff_t offset, len; u16 status = 0; int ret; int i; for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { status = nvmet_copy_from_sgl(req, i * sizeof(range), &range, sizeof(range)); if (status) break; offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; len = le32_to_cpu(range.nlb); len <<= req->ns->blksize_shift; if (offset + len > req->ns->size) { req->error_slba = le64_to_cpu(range.slba); status = errno_to_nvme_status(req, -ENOSPC); break; } ret = vfs_fallocate(req->ns->file, mode, offset, len); if (ret && ret != -EOPNOTSUPP) { req->error_slba = le64_to_cpu(range.slba); status = errno_to_nvme_status(req, ret); break; } } nvmet_req_complete(req, status); } static void nvmet_file_dsm_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); switch (le32_to_cpu(req->cmd->dsm.attributes)) { case NVME_DSMGMT_AD: nvmet_file_execute_discard(req); return; case NVME_DSMGMT_IDR: case NVME_DSMGMT_IDW: default: /* Not supported yet */ nvmet_req_complete(req, 0); return; } } static void nvmet_file_execute_dsm(struct nvmet_req *req) { if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) return; INIT_WORK(&req->f.work, nvmet_file_dsm_work); queue_work(nvmet_wq, &req->f.work); } static void nvmet_file_write_zeroes_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE; loff_t offset; loff_t len; int ret; offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift; len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << req->ns->blksize_shift); if (unlikely(offset + len > req->ns->size)) { nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); return; } ret = vfs_fallocate(req->ns->file, mode, offset, len); nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0); } static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, 0)) return; INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); queue_work(nvmet_wq, &req->f.work); } u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) { switch (req->cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_file_execute_rw; return 0; case nvme_cmd_flush: req->execute = nvmet_file_execute_flush; return 0; case nvme_cmd_dsm: req->execute = nvmet_file_execute_dsm; return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_file_execute_write_zeroes; return 0; default: return nvmet_report_invalid_opcode(req); } }
linux-master
drivers/nvme/target/io-cmd-file.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe Over Fabrics Target Passthrough command implementation. * * Copyright (c) 2017-2018 Western Digital Corporation or its * affiliates. * Copyright (c) 2019-2020, Eideticom Inc. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include "../host/nvme.h" #include "nvmet.h" MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU); /* * xarray to maintain one passthru subsystem per nvme controller. */ static DEFINE_XARRAY(passthru_subsystems); void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl) { /* * Multiple command set support can only be declared if the underlying * controller actually supports it. */ if (!nvme_multi_css(ctrl->subsys->passthru_ctrl)) ctrl->cap &= ~(1ULL << 43); } static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; u16 status = NVME_SC_SUCCESS; int pos, len; bool csi_seen = false; void *data; u8 csi; if (!ctrl->subsys->clear_ids) return status; data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); if (!data) return NVME_SC_INTERNAL; status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE); if (status) goto out_free; for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { struct nvme_ns_id_desc *cur = data + pos; if (cur->nidl == 0) break; if (cur->nidt == NVME_NIDT_CSI) { memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN); csi_seen = true; break; } len = sizeof(struct nvme_ns_id_desc) + cur->nidl; } memset(data, 0, NVME_IDENTIFY_DATA_SIZE); if (csi_seen) { struct nvme_ns_id_desc *cur = data; cur->nidt = NVME_NIDT_CSI; cur->nidl = NVME_NIDT_CSI_LEN; memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN); } status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE); out_free: kfree(data); return status; } static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl; u16 status = NVME_SC_SUCCESS; struct nvme_id_ctrl *id; unsigned int max_hw_sectors; int page_shift; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return NVME_SC_INTERNAL; status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id)); if (status) goto out_free; id->cntlid = cpu_to_le16(ctrl->cntlid); id->ver = cpu_to_le32(ctrl->subsys->ver); /* * The passthru NVMe driver may have a limit on the number of segments * which depends on the host's memory fragementation. To solve this, * ensure mdts is limited to the pages equal to the number of segments. */ max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT, pctrl->max_hw_sectors); /* * nvmet_passthru_map_sg is limitted to using a single bio so limit * the mdts based on BIO_MAX_VECS as well */ max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT, max_hw_sectors); page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; id->acl = 3; /* * We export aerl limit for the fabrics controller, update this when * passthru based aerl support is added. */ id->aerl = NVMET_ASYNC_EVENTS - 1; /* emulate kas as most of the PCIe ctrl don't have a support for kas */ id->kas = cpu_to_le16(NVMET_KAS); /* don't support host memory buffer */ id->hmpre = 0; id->hmmin = 0; id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); /* don't support fuse commands */ id->fuses = 0; id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ if (ctrl->ops->flags & NVMF_KEYED_SGLS) id->sgls |= cpu_to_le32(1 << 2); if (req->port->inline_data_size) id->sgls |= cpu_to_le32(1 << 20); /* * When passthru controller is setup using nvme-loop transport it will * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() * code path with duplicate ctr subsynqn. In order to prevent that we * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. */ memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); /* use fabric id-ctrl values */ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + req->port->inline_data_size) / 16); id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); id->msdbd = ctrl->ops->msdbd; /* Support multipath connections with fabrics */ id->cmic |= 1 << 1; /* Disable reservations, see nvmet_parse_passthru_io_cmd() */ id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS); status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl)); out_free: kfree(id); return status; } static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req) { u16 status = NVME_SC_SUCCESS; struct nvme_id_ns *id; int i; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return NVME_SC_INTERNAL; status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns)); if (status) goto out_free; for (i = 0; i < (id->nlbaf + 1); i++) if (id->lbaf[i].ms) memset(&id->lbaf[i], 0, sizeof(id->lbaf[i])); id->flbas = id->flbas & ~(1 << 4); /* * Presently the NVMEof target code does not support sending * metadata, so we must disable it here. This should be updated * once target starts supporting metadata. */ id->mc = 0; if (req->sq->ctrl->subsys->clear_ids) { memset(id->nguid, 0, NVME_NIDT_NGUID_LEN); memset(id->eui64, 0, NVME_NIDT_EUI64_LEN); } status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); out_free: kfree(id); return status; } static void nvmet_passthru_execute_cmd_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, p.work); struct request *rq = req->p.rq; struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; struct nvme_ns *ns = rq->q->queuedata; u32 effects; int status; effects = nvme_passthru_start(ctrl, ns, req->cmd->common.opcode); status = nvme_execute_rq(rq, false); if (status == NVME_SC_SUCCESS && req->cmd->common.opcode == nvme_admin_identify) { switch (req->cmd->identify.cns) { case NVME_ID_CNS_CTRL: nvmet_passthru_override_id_ctrl(req); break; case NVME_ID_CNS_NS: nvmet_passthru_override_id_ns(req); break; case NVME_ID_CNS_NS_DESC_LIST: nvmet_passthru_override_id_descs(req); break; } } else if (status < 0) status = NVME_SC_INTERNAL; req->cqe->result = nvme_req(rq)->result; nvmet_req_complete(req, status); blk_mq_free_request(rq); if (effects) nvme_passthru_end(ctrl, ns, effects, req->cmd, status); } static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, blk_status_t blk_status) { struct nvmet_req *req = rq->end_io_data; req->cqe->result = nvme_req(rq)->result; nvmet_req_complete(req, nvme_req(rq)->status); blk_mq_free_request(rq); return RQ_END_IO_NONE; } static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) { struct scatterlist *sg; struct bio *bio; int i; if (req->sg_cnt > BIO_MAX_VECS) return -EINVAL; if (nvmet_use_inline_bvec(req)) { bio = &req->p.inline_bio; bio_init(bio, NULL, req->inline_bvec, ARRAY_SIZE(req->inline_bvec), req_op(rq)); } else { bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq), GFP_KERNEL); bio->bi_end_io = bio_put; } for_each_sg(req->sg, sg, req->sg_cnt, i) { if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, sg->offset) < sg->length) { nvmet_req_bio_put(req, bio); return -EINVAL; } } blk_rq_bio_prep(rq, bio, req->sg_cnt); return 0; } static void nvmet_passthru_execute_cmd(struct nvmet_req *req) { struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl; struct request_queue *q = ctrl->admin_q; struct nvme_ns *ns = NULL; struct request *rq = NULL; unsigned int timeout; u32 effects; u16 status; int ret; if (likely(req->sq->qid != 0)) { u32 nsid = le32_to_cpu(req->cmd->common.nsid); ns = nvme_find_get_ns(ctrl, nsid); if (unlikely(!ns)) { pr_err("failed to get passthru ns nsid:%u\n", nsid); status = NVME_SC_INVALID_NS | NVME_SC_DNR; goto out; } q = ns->queue; timeout = nvmet_req_subsys(req)->io_timeout; } else { timeout = nvmet_req_subsys(req)->admin_timeout; } rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0); if (IS_ERR(rq)) { status = NVME_SC_INTERNAL; goto out_put_ns; } nvme_init_request(rq, req->cmd); if (timeout) rq->timeout = timeout; if (req->sg_cnt) { ret = nvmet_passthru_map_sg(req, rq); if (unlikely(ret)) { status = NVME_SC_INTERNAL; goto out_put_req; } } /* * If a command needs post-execution fixups, or there are any * non-trivial effects, make sure to execute the command synchronously * in a workqueue so that nvme_passthru_end gets called. */ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); if (req->p.use_workqueue || (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) { INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); req->p.rq = rq; queue_work(nvmet_wq, &req->p.work); } else { rq->end_io = nvmet_passthru_req_done; rq->end_io_data = req; blk_execute_rq_nowait(rq, false); } if (ns) nvme_put_ns(ns); return; out_put_req: blk_mq_free_request(rq); out_put_ns: if (ns) nvme_put_ns(ns); out: nvmet_req_complete(req, status); } /* * We need to emulate set host behaviour to ensure that any requested * behaviour of the target's host matches the requested behaviour * of the device's host and fail otherwise. */ static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req) { struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl; struct nvme_feat_host_behavior *host; u16 status = NVME_SC_INTERNAL; int ret; host = kzalloc(sizeof(*host) * 2, GFP_KERNEL); if (!host) goto out_complete_req; ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, host, sizeof(*host), NULL); if (ret) goto out_free_host; status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host)); if (status) goto out_free_host; if (memcmp(&host[0], &host[1], sizeof(host[0]))) { pr_warn("target host has requested different behaviour from the local host\n"); status = NVME_SC_INTERNAL; } out_free_host: kfree(host); out_complete_req: nvmet_req_complete(req, status); } static u16 nvmet_setup_passthru_command(struct nvmet_req *req) { req->p.use_workqueue = false; req->execute = nvmet_passthru_execute_cmd; return NVME_SC_SUCCESS; } u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) { /* Reject any commands with non-sgl flags set (ie. fused commands) */ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) return NVME_SC_INVALID_FIELD; switch (req->cmd->common.opcode) { case nvme_cmd_resv_register: case nvme_cmd_resv_report: case nvme_cmd_resv_acquire: case nvme_cmd_resv_release: /* * Reservations cannot be supported properly because the * underlying device has no way of differentiating different * hosts that connect via fabrics. This could potentially be * emulated in the future if regular targets grow support for * this feature. */ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } return nvmet_setup_passthru_command(req); } /* * Only features that are emulated or specifically allowed in the list are * passed down to the controller. This function implements the allow list for * both get and set features. */ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) { switch (le32_to_cpu(req->cmd->features.fid)) { case NVME_FEAT_ARBITRATION: case NVME_FEAT_POWER_MGMT: case NVME_FEAT_LBA_RANGE: case NVME_FEAT_TEMP_THRESH: case NVME_FEAT_ERR_RECOVERY: case NVME_FEAT_VOLATILE_WC: case NVME_FEAT_WRITE_ATOMIC: case NVME_FEAT_AUTO_PST: case NVME_FEAT_TIMESTAMP: case NVME_FEAT_HCTM: case NVME_FEAT_NOPSC: case NVME_FEAT_RRL: case NVME_FEAT_PLM_CONFIG: case NVME_FEAT_PLM_WINDOW: case NVME_FEAT_HOST_BEHAVIOR: case NVME_FEAT_SANITIZE: case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END: return nvmet_setup_passthru_command(req); case NVME_FEAT_ASYNC_EVENT: /* There is no support for forwarding ASYNC events */ case NVME_FEAT_IRQ_COALESCE: case NVME_FEAT_IRQ_CONFIG: /* The IRQ settings will not apply to the target controller */ case NVME_FEAT_HOST_MEM_BUF: /* * Any HMB that's set will not be passed through and will * not work as expected */ case NVME_FEAT_SW_PROGRESS: /* * The Pre-Boot Software Load Count doesn't make much * sense for a target to export */ case NVME_FEAT_RESV_MASK: case NVME_FEAT_RESV_PERSIST: /* No reservations, see nvmet_parse_passthru_io_cmd() */ default: return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } } u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) { /* Reject any commands with non-sgl flags set (ie. fused commands) */ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) return NVME_SC_INVALID_FIELD; /* * Passthru all vendor specific commands */ if (req->cmd->common.opcode >= nvme_admin_vendor_start) return nvmet_setup_passthru_command(req); switch (req->cmd->common.opcode) { case nvme_admin_async_event: req->execute = nvmet_execute_async_event; return NVME_SC_SUCCESS; case nvme_admin_keep_alive: /* * Most PCIe ctrls don't support keep alive cmd, we route keep * alive to the non-passthru mode. In future please change this * code when PCIe ctrls with keep alive support available. */ req->execute = nvmet_execute_keep_alive; return NVME_SC_SUCCESS; case nvme_admin_set_features: switch (le32_to_cpu(req->cmd->features.fid)) { case NVME_FEAT_ASYNC_EVENT: case NVME_FEAT_KATO: case NVME_FEAT_NUM_QUEUES: case NVME_FEAT_HOST_ID: req->execute = nvmet_execute_set_features; return NVME_SC_SUCCESS; case NVME_FEAT_HOST_BEHAVIOR: req->execute = nvmet_passthru_set_host_behaviour; return NVME_SC_SUCCESS; default: return nvmet_passthru_get_set_features(req); } break; case nvme_admin_get_features: switch (le32_to_cpu(req->cmd->features.fid)) { case NVME_FEAT_ASYNC_EVENT: case NVME_FEAT_KATO: case NVME_FEAT_NUM_QUEUES: case NVME_FEAT_HOST_ID: req->execute = nvmet_execute_get_features; return NVME_SC_SUCCESS; default: return nvmet_passthru_get_set_features(req); } break; case nvme_admin_identify: switch (req->cmd->identify.cns) { case NVME_ID_CNS_CTRL: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; case NVME_ID_CNS_CS_CTRL: switch (req->cmd->identify.csi) { case NVME_CSI_ZNS: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; } return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; case NVME_ID_CNS_NS: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; case NVME_ID_CNS_CS_NS: switch (req->cmd->identify.csi) { case NVME_CSI_ZNS: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; } return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; default: return nvmet_setup_passthru_command(req); } case nvme_admin_get_log_page: return nvmet_setup_passthru_command(req); default: /* Reject commands not in the allowlist above */ return nvmet_report_invalid_opcode(req); } } int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys) { struct nvme_ctrl *ctrl; struct file *file; int ret = -EINVAL; void *old; mutex_lock(&subsys->lock); if (!subsys->passthru_ctrl_path) goto out_unlock; if (subsys->passthru_ctrl) goto out_unlock; if (subsys->nr_namespaces) { pr_info("cannot enable both passthru and regular namespaces for a single subsystem"); goto out_unlock; } file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0); if (IS_ERR(file)) { ret = PTR_ERR(file); goto out_unlock; } ctrl = nvme_ctrl_from_file(file); if (!ctrl) { pr_err("failed to open nvme controller %s\n", subsys->passthru_ctrl_path); goto out_put_file; } old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL, subsys, GFP_KERNEL); if (xa_is_err(old)) { ret = xa_err(old); goto out_put_file; } if (old) goto out_put_file; subsys->passthru_ctrl = ctrl; subsys->ver = ctrl->vs; if (subsys->ver < NVME_VS(1, 2, 1)) { pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n", NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver), NVME_TERTIARY(subsys->ver)); subsys->ver = NVME_VS(1, 2, 1); } nvme_get_ctrl(ctrl); __module_get(subsys->passthru_ctrl->ops->module); ret = 0; out_put_file: filp_close(file, NULL); out_unlock: mutex_unlock(&subsys->lock); return ret; } static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) { if (subsys->passthru_ctrl) { xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid); module_put(subsys->passthru_ctrl->ops->module); nvme_put_ctrl(subsys->passthru_ctrl); } subsys->passthru_ctrl = NULL; subsys->ver = NVMET_DEFAULT_VS; } void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) { mutex_lock(&subsys->lock); __nvmet_passthru_ctrl_disable(subsys); mutex_unlock(&subsys->lock); } void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys) { mutex_lock(&subsys->lock); __nvmet_passthru_ctrl_disable(subsys); mutex_unlock(&subsys->lock); kfree(subsys->passthru_ctrl_path); }
linux-master
drivers/nvme/target/passthru.c
// SPDX-License-Identifier: GPL-2.0 /* * NVMe I/O command implementation. * Copyright (c) 2015-2016 HGST, a Western Digital Company. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include <linux/memremap.h> #include <linux/module.h> #include "nvmet.h" void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) { /* Logical blocks per physical block, 0's based. */ const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) / bdev_logical_block_size(bdev)); /* * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, * NAWUPF, and NACWU are defined for this namespace and should be * used by the host for this namespace instead of the AWUN, AWUPF, * and ACWU fields in the Identify Controller data structure. If * any of these fields are zero that means that the corresponding * field from the identify controller data structure should be used. */ id->nsfeat |= 1 << 1; id->nawun = lpp0b; id->nawupf = lpp0b; id->nacwu = lpp0b; /* * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and * NOWS are defined for this namespace and should be used by * the host for I/O optimization. */ id->nsfeat |= 1 << 4; /* NPWG = Namespace Preferred Write Granularity. 0's based */ id->npwg = lpp0b; /* NPWA = Namespace Preferred Write Alignment. 0's based */ id->npwa = id->npwg; /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ id->npdg = to0based(bdev_discard_granularity(bdev) / bdev_logical_block_size(bdev)); /* NPDG = Namespace Preferred Deallocate Alignment */ id->npda = id->npdg; /* NOWS = Namespace Optimal Write Size */ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev)); } void nvmet_bdev_ns_disable(struct nvmet_ns *ns) { if (ns->bdev) { blkdev_put(ns->bdev, NULL); ns->bdev = NULL; } } static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) { struct blk_integrity *bi = bdev_get_integrity(ns->bdev); if (bi) { ns->metadata_size = bi->tuple_size; if (bi->profile == &t10_pi_type1_crc) ns->pi_type = NVME_NS_DPS_PI_TYPE1; else if (bi->profile == &t10_pi_type3_crc) ns->pi_type = NVME_NS_DPS_PI_TYPE3; else /* Unsupported metadata type */ ns->metadata_size = 0; } } int nvmet_bdev_ns_enable(struct nvmet_ns *ns) { int ret; /* * When buffered_io namespace attribute is enabled that means user want * this block device to be used as a file, so block device can take * an advantage of cache. */ if (ns->buffered_io) return -ENOTBLK; ns->bdev = blkdev_get_by_path(ns->device_path, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL); if (IS_ERR(ns->bdev)) { ret = PTR_ERR(ns->bdev); if (ret != -ENOTBLK) { pr_err("failed to open block device %s: (%ld)\n", ns->device_path, PTR_ERR(ns->bdev)); } ns->bdev = NULL; return ret; } ns->size = bdev_nr_bytes(ns->bdev); ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); ns->pi_type = 0; ns->metadata_size = 0; if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10)) nvmet_bdev_ns_enable_integrity(ns); if (bdev_is_zoned(ns->bdev)) { if (!nvmet_bdev_zns_enable(ns)) { nvmet_bdev_ns_disable(ns); return -EINVAL; } ns->csi = NVME_CSI_ZNS; } return 0; } void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns) { ns->size = bdev_nr_bytes(ns->bdev); } u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) { u16 status = NVME_SC_SUCCESS; if (likely(blk_sts == BLK_STS_OK)) return status; /* * Right now there exists M : 1 mapping between block layer error * to the NVMe status code (see nvme_error_status()). For consistency, * when we reverse map we use most appropriate NVMe Status code from * the group of the NVMe staus codes used in the nvme_error_status(). */ switch (blk_sts) { case BLK_STS_NOSPC: status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; req->error_loc = offsetof(struct nvme_rw_command, length); break; case BLK_STS_TARGET: status = NVME_SC_LBA_RANGE | NVME_SC_DNR; req->error_loc = offsetof(struct nvme_rw_command, slba); break; case BLK_STS_NOTSUPP: req->error_loc = offsetof(struct nvme_common_command, opcode); switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; break; default: status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } break; case BLK_STS_MEDIUM: status = NVME_SC_ACCESS_DENIED; req->error_loc = offsetof(struct nvme_rw_command, nsid); break; case BLK_STS_IOERR: default: status = NVME_SC_INTERNAL | NVME_SC_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); } switch (req->cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_write: req->error_slba = le64_to_cpu(req->cmd->rw.slba); break; case nvme_cmd_write_zeroes: req->error_slba = le64_to_cpu(req->cmd->write_zeroes.slba); break; default: req->error_slba = 0; } return status; } static void nvmet_bio_done(struct bio *bio) { struct nvmet_req *req = bio->bi_private; nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); nvmet_req_bio_put(req, bio); } #ifdef CONFIG_BLK_DEV_INTEGRITY static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, struct sg_mapping_iter *miter) { struct blk_integrity *bi; struct bio_integrity_payload *bip; int rc; size_t resid, len; bi = bdev_get_integrity(req->ns->bdev); if (unlikely(!bi)) { pr_err("Unable to locate bio_integrity\n"); return -ENODEV; } bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(req->metadata_sg_cnt)); if (IS_ERR(bip)) { pr_err("Unable to allocate bio_integrity_payload\n"); return PTR_ERR(bip); } /* virtual start sector must be in integrity interval units */ bip_set_seed(bip, bio->bi_iter.bi_sector >> (bi->interval_exp - SECTOR_SHIFT)); resid = bio_integrity_bytes(bi, bio_sectors(bio)); while (resid > 0 && sg_miter_next(miter)) { len = min_t(size_t, miter->length, resid); rc = bio_integrity_add_page(bio, miter->page, len, offset_in_page(miter->addr)); if (unlikely(rc != len)) { pr_err("bio_integrity_add_page() failed; %d\n", rc); sg_miter_stop(miter); return -ENOMEM; } resid -= len; if (len < miter->length) miter->consumed -= miter->length - len; } sg_miter_stop(miter); return 0; } #else static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, struct sg_mapping_iter *miter) { return -EINVAL; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ static void nvmet_bdev_execute_rw(struct nvmet_req *req) { unsigned int sg_cnt = req->sg_cnt; struct bio *bio; struct scatterlist *sg; struct blk_plug plug; sector_t sector; blk_opf_t opf; int i, rc; struct sg_mapping_iter prot_miter; unsigned int iter_flags; unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len; if (!nvmet_check_transfer_len(req, total_len)) return; if (!req->sg_cnt) { nvmet_req_complete(req, 0); return; } if (req->cmd->rw.opcode == nvme_cmd_write) { opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) opf |= REQ_FUA; iter_flags = SG_MITER_TO_SG; } else { opf = REQ_OP_READ; iter_flags = SG_MITER_FROM_SG; } if (is_pci_p2pdma_page(sg_page(req->sg))) opf |= REQ_NOMERGE; sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); if (nvmet_use_inline_bvec(req)) { bio = &req->b.inline_bio; bio_init(bio, req->ns->bdev, req->inline_bvec, ARRAY_SIZE(req->inline_bvec), opf); } else { bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, GFP_KERNEL); } bio->bi_iter.bi_sector = sector; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; blk_start_plug(&plug); if (req->metadata_len) sg_miter_start(&prot_miter, req->metadata_sg, req->metadata_sg_cnt, iter_flags); for_each_sg(req->sg, sg, req->sg_cnt, i) { while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { struct bio *prev = bio; if (req->metadata_len) { rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter); if (unlikely(rc)) { bio_io_error(bio); return; } } bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, GFP_KERNEL); bio->bi_iter.bi_sector = sector; bio_chain(bio, prev); submit_bio(prev); } sector += sg->length >> 9; sg_cnt--; } if (req->metadata_len) { rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter); if (unlikely(rc)) { bio_io_error(bio); return; } } submit_bio(bio); blk_finish_plug(&plug); } static void nvmet_bdev_execute_flush(struct nvmet_req *req) { struct bio *bio = &req->b.inline_bio; if (!bdev_write_cache(req->ns->bdev)) { nvmet_req_complete(req, NVME_SC_SUCCESS); return; } if (!nvmet_check_transfer_len(req, 0)) return; bio_init(bio, req->ns->bdev, req->inline_bvec, ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH); bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; submit_bio(bio); } u16 nvmet_bdev_flush(struct nvmet_req *req) { if (!bdev_write_cache(req->ns->bdev)) return 0; if (blkdev_issue_flush(req->ns->bdev)) return NVME_SC_INTERNAL | NVME_SC_DNR; return 0; } static u16 nvmet_bdev_discard_range(struct nvmet_req *req, struct nvme_dsm_range *range, struct bio **bio) { struct nvmet_ns *ns = req->ns; int ret; ret = __blkdev_issue_discard(ns->bdev, nvmet_lba_to_sect(ns, range->slba), le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), GFP_KERNEL, bio); if (ret && ret != -EOPNOTSUPP) { req->error_slba = le64_to_cpu(range->slba); return errno_to_nvme_status(req, ret); } return NVME_SC_SUCCESS; } static void nvmet_bdev_execute_discard(struct nvmet_req *req) { struct nvme_dsm_range range; struct bio *bio = NULL; int i; u16 status; for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { status = nvmet_copy_from_sgl(req, i * sizeof(range), &range, sizeof(range)); if (status) break; status = nvmet_bdev_discard_range(req, &range, &bio); if (status) break; } if (bio) { bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; if (status) bio_io_error(bio); else submit_bio(bio); } else { nvmet_req_complete(req, status); } } static void nvmet_bdev_execute_dsm(struct nvmet_req *req) { if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) return; switch (le32_to_cpu(req->cmd->dsm.attributes)) { case NVME_DSMGMT_AD: nvmet_bdev_execute_discard(req); return; case NVME_DSMGMT_IDR: case NVME_DSMGMT_IDW: default: /* Not supported yet */ nvmet_req_complete(req, 0); return; } } static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) { struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; struct bio *bio = NULL; sector_t sector; sector_t nr_sector; int ret; if (!nvmet_check_transfer_len(req, 0)) return; sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba); nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << (req->ns->blksize_shift - 9)); ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, GFP_KERNEL, &bio, 0); if (bio) { bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; submit_bio(bio); } else { nvmet_req_complete(req, errno_to_nvme_status(req, ret)); } } u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) { switch (req->cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_bdev_execute_rw; if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) req->metadata_len = nvmet_rw_metadata_len(req); return 0; case nvme_cmd_flush: req->execute = nvmet_bdev_execute_flush; return 0; case nvme_cmd_dsm: req->execute = nvmet_bdev_execute_dsm; return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; return 0; default: return nvmet_report_invalid_opcode(req); } }
linux-master
drivers/nvme/target/io-cmd-bdev.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2020 Hannes Reinecke, SUSE Linux */ #include <linux/module.h> #include <linux/crc32.h> #include <linux/base64.h> #include <linux/prandom.h> #include <linux/scatterlist.h> #include <asm/unaligned.h> #include <crypto/hash.h> #include <crypto/dh.h> #include <linux/nvme.h> #include <linux/nvme-auth.h> static u32 nvme_dhchap_seqnum; static DEFINE_MUTEX(nvme_dhchap_mutex); u32 nvme_auth_get_seqnum(void) { u32 seqnum; mutex_lock(&nvme_dhchap_mutex); if (!nvme_dhchap_seqnum) nvme_dhchap_seqnum = get_random_u32(); else { nvme_dhchap_seqnum++; if (!nvme_dhchap_seqnum) nvme_dhchap_seqnum++; } seqnum = nvme_dhchap_seqnum; mutex_unlock(&nvme_dhchap_mutex); return seqnum; } EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum); static struct nvme_auth_dhgroup_map { const char name[16]; const char kpp[16]; } dhgroup_map[] = { [NVME_AUTH_DHGROUP_NULL] = { .name = "null", .kpp = "null" }, [NVME_AUTH_DHGROUP_2048] = { .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" }, [NVME_AUTH_DHGROUP_3072] = { .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" }, [NVME_AUTH_DHGROUP_4096] = { .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" }, [NVME_AUTH_DHGROUP_6144] = { .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" }, [NVME_AUTH_DHGROUP_8192] = { .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" }, }; const char *nvme_auth_dhgroup_name(u8 dhgroup_id) { if (dhgroup_id >= ARRAY_SIZE(dhgroup_map)) return NULL; return dhgroup_map[dhgroup_id].name; } EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name); const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id) { if (dhgroup_id >= ARRAY_SIZE(dhgroup_map)) return NULL; return dhgroup_map[dhgroup_id].kpp; } EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp); u8 nvme_auth_dhgroup_id(const char *dhgroup_name) { int i; if (!dhgroup_name || !strlen(dhgroup_name)) return NVME_AUTH_DHGROUP_INVALID; for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) { if (!strlen(dhgroup_map[i].name)) continue; if (!strncmp(dhgroup_map[i].name, dhgroup_name, strlen(dhgroup_map[i].name))) return i; } return NVME_AUTH_DHGROUP_INVALID; } EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id); static struct nvme_dhchap_hash_map { int len; const char hmac[15]; const char digest[8]; } hash_map[] = { [NVME_AUTH_HASH_SHA256] = { .len = 32, .hmac = "hmac(sha256)", .digest = "sha256", }, [NVME_AUTH_HASH_SHA384] = { .len = 48, .hmac = "hmac(sha384)", .digest = "sha384", }, [NVME_AUTH_HASH_SHA512] = { .len = 64, .hmac = "hmac(sha512)", .digest = "sha512", }, }; const char *nvme_auth_hmac_name(u8 hmac_id) { if (hmac_id >= ARRAY_SIZE(hash_map)) return NULL; return hash_map[hmac_id].hmac; } EXPORT_SYMBOL_GPL(nvme_auth_hmac_name); const char *nvme_auth_digest_name(u8 hmac_id) { if (hmac_id >= ARRAY_SIZE(hash_map)) return NULL; return hash_map[hmac_id].digest; } EXPORT_SYMBOL_GPL(nvme_auth_digest_name); u8 nvme_auth_hmac_id(const char *hmac_name) { int i; if (!hmac_name || !strlen(hmac_name)) return NVME_AUTH_HASH_INVALID; for (i = 0; i < ARRAY_SIZE(hash_map); i++) { if (!strlen(hash_map[i].hmac)) continue; if (!strncmp(hash_map[i].hmac, hmac_name, strlen(hash_map[i].hmac))) return i; } return NVME_AUTH_HASH_INVALID; } EXPORT_SYMBOL_GPL(nvme_auth_hmac_id); size_t nvme_auth_hmac_hash_len(u8 hmac_id) { if (hmac_id >= ARRAY_SIZE(hash_map)) return 0; return hash_map[hmac_id].len; } EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len); struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, u8 key_hash) { struct nvme_dhchap_key *key; unsigned char *p; u32 crc; int ret, key_len; size_t allocated_len = strlen(secret); /* Secret might be affixed with a ':' */ p = strrchr(secret, ':'); if (p) allocated_len = p - secret; key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return ERR_PTR(-ENOMEM); key->key = kzalloc(allocated_len, GFP_KERNEL); if (!key->key) { ret = -ENOMEM; goto out_free_key; } key_len = base64_decode(secret, allocated_len, key->key); if (key_len < 0) { pr_debug("base64 key decoding error %d\n", key_len); ret = key_len; goto out_free_secret; } if (key_len != 36 && key_len != 52 && key_len != 68) { pr_err("Invalid key len %d\n", key_len); ret = -EINVAL; goto out_free_secret; } if (key_hash > 0 && (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) { pr_err("Mismatched key len %d for %s\n", key_len, nvme_auth_hmac_name(key_hash)); ret = -EINVAL; goto out_free_secret; } /* The last four bytes is the CRC in little-endian format */ key_len -= 4; /* * The linux implementation doesn't do pre- and post-increments, * so we have to do it manually. */ crc = ~crc32(~0, key->key, key_len); if (get_unaligned_le32(key->key + key_len) != crc) { pr_err("key crc mismatch (key %08x, crc %08x)\n", get_unaligned_le32(key->key + key_len), crc); ret = -EKEYREJECTED; goto out_free_secret; } key->len = key_len; key->hash = key_hash; return key; out_free_secret: kfree_sensitive(key->key); out_free_key: kfree(key); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(nvme_auth_extract_key); void nvme_auth_free_key(struct nvme_dhchap_key *key) { if (!key) return; kfree_sensitive(key->key); kfree(key); } EXPORT_SYMBOL_GPL(nvme_auth_free_key); u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn) { const char *hmac_name; struct crypto_shash *key_tfm; struct shash_desc *shash; u8 *transformed_key; int ret; if (!key || !key->key) { pr_warn("No key specified\n"); return ERR_PTR(-ENOKEY); } if (key->hash == 0) { transformed_key = kmemdup(key->key, key->len, GFP_KERNEL); return transformed_key ? transformed_key : ERR_PTR(-ENOMEM); } hmac_name = nvme_auth_hmac_name(key->hash); if (!hmac_name) { pr_warn("Invalid key hash id %d\n", key->hash); return ERR_PTR(-EINVAL); } key_tfm = crypto_alloc_shash(hmac_name, 0, 0); if (IS_ERR(key_tfm)) return (u8 *)key_tfm; shash = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(key_tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; goto out_free_key; } transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL); if (!transformed_key) { ret = -ENOMEM; goto out_free_shash; } shash->tfm = key_tfm; ret = crypto_shash_setkey(key_tfm, key->key, key->len); if (ret < 0) goto out_free_transformed_key; ret = crypto_shash_init(shash); if (ret < 0) goto out_free_transformed_key; ret = crypto_shash_update(shash, nqn, strlen(nqn)); if (ret < 0) goto out_free_transformed_key; ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17); if (ret < 0) goto out_free_transformed_key; ret = crypto_shash_final(shash, transformed_key); if (ret < 0) goto out_free_transformed_key; kfree(shash); crypto_free_shash(key_tfm); return transformed_key; out_free_transformed_key: kfree_sensitive(transformed_key); out_free_shash: kfree(shash); out_free_key: crypto_free_shash(key_tfm); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(nvme_auth_transform_key); static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey) { const char *digest_name; struct crypto_shash *tfm; int ret; digest_name = nvme_auth_digest_name(hmac_id); if (!digest_name) { pr_debug("%s: failed to get digest for %d\n", __func__, hmac_id); return -EINVAL; } tfm = crypto_alloc_shash(digest_name, 0, 0); if (IS_ERR(tfm)) return -ENOMEM; ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey); if (ret < 0) pr_debug("%s: Failed to hash digest len %zu\n", __func__, skey_len); crypto_free_shash(tfm); return ret; } int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, u8 *challenge, u8 *aug, size_t hlen) { struct crypto_shash *tfm; struct shash_desc *desc; u8 *hashed_key; const char *hmac_name; int ret; hashed_key = kmalloc(hlen, GFP_KERNEL); if (!hashed_key) return -ENOMEM; ret = nvme_auth_hash_skey(hmac_id, skey, skey_len, hashed_key); if (ret < 0) goto out_free_key; hmac_name = nvme_auth_hmac_name(hmac_id); if (!hmac_name) { pr_warn("%s: invalid hash algorithm %d\n", __func__, hmac_id); ret = -EINVAL; goto out_free_key; } tfm = crypto_alloc_shash(hmac_name, 0, 0); if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); goto out_free_key; } desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!desc) { ret = -ENOMEM; goto out_free_hash; } desc->tfm = tfm; ret = crypto_shash_setkey(tfm, hashed_key, hlen); if (ret) goto out_free_desc; ret = crypto_shash_init(desc); if (ret) goto out_free_desc; ret = crypto_shash_update(desc, challenge, hlen); if (ret) goto out_free_desc; ret = crypto_shash_final(desc, aug); out_free_desc: kfree_sensitive(desc); out_free_hash: crypto_free_shash(tfm); out_free_key: kfree_sensitive(hashed_key); return ret; } EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge); int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid) { int ret; ret = crypto_kpp_set_secret(dh_tfm, NULL, 0); if (ret) pr_debug("failed to set private key, error %d\n", ret); return ret; } EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey); int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, u8 *host_key, size_t host_key_len) { struct kpp_request *req; struct crypto_wait wait; struct scatterlist dst; int ret; req = kpp_request_alloc(dh_tfm, GFP_KERNEL); if (!req) return -ENOMEM; crypto_init_wait(&wait); kpp_request_set_input(req, NULL, 0); sg_init_one(&dst, host_key, host_key_len); kpp_request_set_output(req, &dst, host_key_len); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait); kpp_request_free(req); return ret; } EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey); int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, u8 *ctrl_key, size_t ctrl_key_len, u8 *sess_key, size_t sess_key_len) { struct kpp_request *req; struct crypto_wait wait; struct scatterlist src, dst; int ret; req = kpp_request_alloc(dh_tfm, GFP_KERNEL); if (!req) return -ENOMEM; crypto_init_wait(&wait); sg_init_one(&src, ctrl_key, ctrl_key_len); kpp_request_set_input(req, &src, ctrl_key_len); sg_init_one(&dst, sess_key, sess_key_len); kpp_request_set_output(req, &dst, sess_key_len); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); kpp_request_free(req); return ret; } EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret); int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key) { struct nvme_dhchap_key *key; u8 key_hash; if (!secret) { *ret_key = NULL; return 0; } if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1) return -EINVAL; /* Pass in the secret without the 'DHHC-1:XX:' prefix */ key = nvme_auth_extract_key(secret + 10, key_hash); if (IS_ERR(key)) { *ret_key = NULL; return PTR_ERR(key); } *ret_key = key; return 0; } EXPORT_SYMBOL_GPL(nvme_auth_generate_key); MODULE_LICENSE("GPL v2");
linux-master
drivers/nvme/common/auth.c
// SPDX-License-Identifier: GPL-2.0 /* Bareudp: UDP tunnel encasulation for different Payload types like * MPLS, NSH, IP, etc. * Copyright (c) 2019 Nokia, Inc. * Authors: Martin Varghese, <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/hash.h> #include <net/dst_metadata.h> #include <net/gro_cells.h> #include <net/rtnetlink.h> #include <net/protocol.h> #include <net/ip6_tunnel.h> #include <net/ip_tunnels.h> #include <net/udp_tunnel.h> #include <net/bareudp.h> #define BAREUDP_BASE_HLEN sizeof(struct udphdr) #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \ sizeof(struct udphdr)) #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \ sizeof(struct udphdr)) static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); /* per-network namespace private data for this module */ static unsigned int bareudp_net_id; struct bareudp_net { struct list_head bareudp_list; }; struct bareudp_conf { __be16 ethertype; __be16 port; u16 sport_min; bool multi_proto_mode; }; /* Pseudo network device */ struct bareudp_dev { struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for bareudp tunnel */ __be16 ethertype; __be16 port; u16 sport_min; bool multi_proto_mode; struct socket __rcu *sock; struct list_head next; /* bareudp node on namespace list */ struct gro_cells gro_cells; }; static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; struct bareudp_dev *bareudp; unsigned short family; unsigned int len; __be16 proto; void *oiph; int err; bareudp = rcu_dereference_sk_user_data(sk); if (!bareudp) goto drop; if (skb->protocol == htons(ETH_P_IP)) family = AF_INET; else family = AF_INET6; if (bareudp->ethertype == htons(ETH_P_IP)) { __u8 ipversion; if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion, sizeof(ipversion))) { bareudp->dev->stats.rx_dropped++; goto drop; } ipversion >>= 4; if (ipversion == 4) { proto = htons(ETH_P_IP); } else if (ipversion == 6 && bareudp->multi_proto_mode) { proto = htons(ETH_P_IPV6); } else { bareudp->dev->stats.rx_dropped++; goto drop; } } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) { struct iphdr *tunnel_hdr; tunnel_hdr = (struct iphdr *)skb_network_header(skb); if (tunnel_hdr->version == 4) { if (!ipv4_is_multicast(tunnel_hdr->daddr)) { proto = bareudp->ethertype; } else if (bareudp->multi_proto_mode && ipv4_is_multicast(tunnel_hdr->daddr)) { proto = htons(ETH_P_MPLS_MC); } else { bareudp->dev->stats.rx_dropped++; goto drop; } } else { int addr_type; struct ipv6hdr *tunnel_hdr_v6; tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb); addr_type = ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr); if (!(addr_type & IPV6_ADDR_MULTICAST)) { proto = bareudp->ethertype; } else if (bareudp->multi_proto_mode && (addr_type & IPV6_ADDR_MULTICAST)) { proto = htons(ETH_P_MPLS_MC); } else { bareudp->dev->stats.rx_dropped++; goto drop; } } } else { proto = bareudp->ethertype; } if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN, proto, !net_eq(bareudp->net, dev_net(bareudp->dev)))) { bareudp->dev->stats.rx_dropped++; goto drop; } tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); if (!tun_dst) { bareudp->dev->stats.rx_dropped++; goto drop; } skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; oiph = skb_network_header(skb); skb_reset_network_header(skb); skb_reset_mac_header(skb); if (!ipv6_mod_enabled() || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); else err = IP6_ECN_decapsulate(oiph, skb); if (unlikely(err)) { if (log_ecn_error) { if (!ipv6_mod_enabled() || family == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", &((struct iphdr *)oiph)->saddr, ((struct iphdr *)oiph)->tos); else net_info_ratelimited("non-ECT from %pI6\n", &((struct ipv6hdr *)oiph)->saddr); } if (err > 1) { ++bareudp->dev->stats.rx_frame_errors; ++bareudp->dev->stats.rx_errors; goto drop; } } len = skb->len; err = gro_cells_receive(&bareudp->gro_cells, skb); if (likely(err == NET_RX_SUCCESS)) dev_sw_netstats_rx_add(bareudp->dev, len); return 0; drop: /* Consume bad packet */ kfree_skb(skb); return 0; } static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb) { return 0; } static int bareudp_init(struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); int err; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; err = gro_cells_init(&bareudp->gro_cells, dev); if (err) { free_percpu(dev->tstats); return err; } return 0; } static void bareudp_uninit(struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); gro_cells_destroy(&bareudp->gro_cells); free_percpu(dev->tstats); } static struct socket *bareudp_create_sock(struct net *net, __be16 port) { struct udp_port_cfg udp_conf; struct socket *sock; int err; memset(&udp_conf, 0, sizeof(udp_conf)); if (ipv6_mod_enabled()) udp_conf.family = AF_INET6; else udp_conf.family = AF_INET; udp_conf.local_udp_port = port; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); if (err < 0) return ERR_PTR(err); udp_allow_gso(sock->sk); return sock; } /* Create new listen socket if needed */ static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port) { struct udp_tunnel_sock_cfg tunnel_cfg; struct socket *sock; sock = bareudp_create_sock(bareudp->net, port); if (IS_ERR(sock)) return PTR_ERR(sock); /* Mark socket as an encapsulation socket */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); tunnel_cfg.sk_user_data = bareudp; tunnel_cfg.encap_type = 1; tunnel_cfg.encap_rcv = bareudp_udp_encap_recv; tunnel_cfg.encap_err_lookup = bareudp_err_lookup; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg); rcu_assign_pointer(bareudp->sock, sock); return 0; } static int bareudp_open(struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); int ret = 0; ret = bareudp_socket_create(bareudp, bareudp->port); return ret; } static void bareudp_sock_release(struct bareudp_dev *bareudp) { struct socket *sock; sock = bareudp->sock; rcu_assign_pointer(bareudp->sock, NULL); synchronize_net(); udp_tunnel_sock_release(sock); } static int bareudp_stop(struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); bareudp_sock_release(bareudp); return 0; } static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct bareudp_dev *bareudp, const struct ip_tunnel_info *info) { bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct socket *sock = rcu_dereference(bareudp->sock); bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); const struct ip_tunnel_key *key = &info->key; struct rtable *rt; __be16 sport, df; int min_headroom; __u8 tos, ttl; __be32 saddr; int err; if (!sock) return -ESHUTDOWN; rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info, IPPROTO_UDP, use_cache); if (IS_ERR(rt)) return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, BAREUDP_IPV4_HLEN + info->options_len, false); sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, true); tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; skb_scrub_packet(skb, xnet); err = -ENOSPC; if (!skb_pull(skb, skb_network_offset(skb))) goto free_dst; min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr); err = skb_cow_head(skb, min_headroom); if (unlikely(err)) goto free_dst; err = udp_tunnel_handle_offloads(skb, udp_sum); if (err) goto free_dst; skb_set_inner_protocol(skb, bareudp->ethertype); udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, bareudp->port, !net_eq(bareudp->net, dev_net(bareudp->dev)), !(info->key.tun_flags & TUNNEL_CSUM)); return 0; free_dst: dst_release(&rt->dst); return err; } static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct bareudp_dev *bareudp, const struct ip_tunnel_info *info) { bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct socket *sock = rcu_dereference(bareudp->sock); bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); const struct ip_tunnel_key *key = &info->key; struct dst_entry *dst = NULL; struct in6_addr saddr, daddr; int min_headroom; __u8 prio, ttl; __be16 sport; int err; if (!sock) return -ESHUTDOWN; dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info, IPPROTO_UDP, use_cache); if (IS_ERR(dst)) return PTR_ERR(dst); skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len, false); sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, true); prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; skb_scrub_packet(skb, xnet); err = -ENOSPC; if (!skb_pull(skb, skb_network_offset(skb))) goto free_dst; min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr); err = skb_cow_head(skb, min_headroom); if (unlikely(err)) goto free_dst; err = udp_tunnel_handle_offloads(skb, udp_sum); if (err) goto free_dst; daddr = info->key.u.ipv6.dst; udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev, &saddr, &daddr, prio, ttl, info->key.label, sport, bareudp->port, !(info->key.tun_flags & TUNNEL_CSUM)); return 0; free_dst: dst_release(dst); return err; } static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto) { if (bareudp->ethertype == proto) return true; if (!bareudp->multi_proto_mode) return false; if (bareudp->ethertype == htons(ETH_P_MPLS_UC) && proto == htons(ETH_P_MPLS_MC)) return true; if (bareudp->ethertype == htons(ETH_P_IP) && proto == htons(ETH_P_IPV6)) return true; return false; } static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); struct ip_tunnel_info *info = NULL; int err; if (!bareudp_proto_valid(bareudp, skb->protocol)) { err = -EINVAL; goto tx_error; } info = skb_tunnel_info(skb); if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { err = -EINVAL; goto tx_error; } rcu_read_lock(); if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6) err = bareudp6_xmit_skb(skb, dev, bareudp, info); else err = bareudp_xmit_skb(skb, dev, bareudp, info); rcu_read_unlock(); if (likely(!err)) return NETDEV_TX_OK; tx_error: dev_kfree_skb(skb); if (err == -ELOOP) dev->stats.collisions++; else if (err == -ENETUNREACH) dev->stats.tx_carrier_errors++; dev->stats.tx_errors++; return NETDEV_TX_OK; } static int bareudp_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct bareudp_dev *bareudp = netdev_priv(dev); bool use_cache; use_cache = ip_tunnel_dst_cache_usable(skb, info); if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; __be32 saddr; rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info, IPPROTO_UDP, use_cache); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); info->key.u.ipv4.src = saddr; } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct in6_addr saddr; struct socket *sock = rcu_dereference(bareudp->sock); dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info, IPPROTO_UDP, use_cache); if (IS_ERR(dst)) return PTR_ERR(dst); dst_release(dst); info->key.u.ipv6.src = saddr; } else { return -EINVAL; } info->key.tp_src = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, true); info->key.tp_dst = bareudp->port; return 0; } static const struct net_device_ops bareudp_netdev_ops = { .ndo_init = bareudp_init, .ndo_uninit = bareudp_uninit, .ndo_open = bareudp_open, .ndo_stop = bareudp_stop, .ndo_start_xmit = bareudp_xmit, .ndo_get_stats64 = dev_get_tstats64, .ndo_fill_metadata_dst = bareudp_fill_metadata_dst, }; static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = { [IFLA_BAREUDP_PORT] = { .type = NLA_U16 }, [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 }, [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 }, [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG }, }; /* Info for udev, that this is a virtual tunnel endpoint */ static const struct device_type bareudp_type = { .name = "bareudp", }; /* Initialize the device structure. */ static void bareudp_setup(struct net_device *dev) { dev->netdev_ops = &bareudp_netdev_ops; dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &bareudp_type); dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->hw_features |= NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = ETH_DATA_LEN; dev->min_mtu = IPV4_MIN_MTU; dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN; dev->type = ARPHRD_NONE; netif_keep_dst(dev); dev->priv_flags |= IFF_NO_QUEUE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; } static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (!data) { NL_SET_ERR_MSG(extack, "Not enough attributes provided to perform the operation"); return -EINVAL; } return 0; } static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, struct netlink_ext_ack *extack) { memset(conf, 0, sizeof(*conf)); if (!data[IFLA_BAREUDP_PORT]) { NL_SET_ERR_MSG(extack, "port not specified"); return -EINVAL; } if (!data[IFLA_BAREUDP_ETHERTYPE]) { NL_SET_ERR_MSG(extack, "ethertype not specified"); return -EINVAL; } conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]); conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]); if (data[IFLA_BAREUDP_SRCPORT_MIN]) conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]); if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) conf->multi_proto_mode = true; return 0; } static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn, const struct bareudp_conf *conf) { struct bareudp_dev *bareudp, *t = NULL; list_for_each_entry(bareudp, &bn->bareudp_list, next) { if (conf->port == bareudp->port) t = bareudp; } return t; } static int bareudp_configure(struct net *net, struct net_device *dev, struct bareudp_conf *conf, struct netlink_ext_ack *extack) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); struct bareudp_dev *t, *bareudp = netdev_priv(dev); int err; bareudp->net = net; bareudp->dev = dev; t = bareudp_find_dev(bn, conf); if (t) { NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists"); return -EBUSY; } if (conf->multi_proto_mode && (conf->ethertype != htons(ETH_P_MPLS_UC) && conf->ethertype != htons(ETH_P_IP))) { NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)"); return -EINVAL; } bareudp->port = conf->port; bareudp->ethertype = conf->ethertype; bareudp->sport_min = conf->sport_min; bareudp->multi_proto_mode = conf->multi_proto_mode; err = register_netdevice(dev); if (err) return err; list_add(&bareudp->next, &bn->bareudp_list); return 0; } static int bareudp_link_config(struct net_device *dev, struct nlattr *tb[]) { int err; if (tb[IFLA_MTU]) { err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); if (err) return err; } return 0; } static void bareudp_dellink(struct net_device *dev, struct list_head *head) { struct bareudp_dev *bareudp = netdev_priv(dev); list_del(&bareudp->next); unregister_netdevice_queue(dev, head); } static int bareudp_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct bareudp_conf conf; int err; err = bareudp2info(data, &conf, extack); if (err) return err; err = bareudp_configure(net, dev, &conf, extack); if (err) return err; err = bareudp_link_config(dev, tb); if (err) goto err_unconfig; return 0; err_unconfig: bareudp_dellink(dev, NULL); return err; } static size_t bareudp_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */ nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ 0; } static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min)) goto nla_put_failure; if (bareudp->multi_proto_mode && nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops bareudp_link_ops __read_mostly = { .kind = "bareudp", .maxtype = IFLA_BAREUDP_MAX, .policy = bareudp_policy, .priv_size = sizeof(struct bareudp_dev), .setup = bareudp_setup, .validate = bareudp_validate, .newlink = bareudp_newlink, .dellink = bareudp_dellink, .get_size = bareudp_get_size, .fill_info = bareudp_fill_info, }; static __net_init int bareudp_init_net(struct net *net) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); INIT_LIST_HEAD(&bn->bareudp_list); return 0; } static void bareudp_destroy_tunnels(struct net *net, struct list_head *head) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); struct bareudp_dev *bareudp, *next; list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next) unregister_netdevice_queue(bareudp->dev, head); } static void __net_exit bareudp_exit_batch_net(struct list_head *net_list) { struct net *net; LIST_HEAD(list); rtnl_lock(); list_for_each_entry(net, net_list, exit_list) bareudp_destroy_tunnels(net, &list); /* unregister the devices gathered above */ unregister_netdevice_many(&list); rtnl_unlock(); } static struct pernet_operations bareudp_net_ops = { .init = bareudp_init_net, .exit_batch = bareudp_exit_batch_net, .id = &bareudp_net_id, .size = sizeof(struct bareudp_net), }; static int __init bareudp_init_module(void) { int rc; rc = register_pernet_subsys(&bareudp_net_ops); if (rc) goto out1; rc = rtnl_link_register(&bareudp_link_ops); if (rc) goto out2; return 0; out2: unregister_pernet_subsys(&bareudp_net_ops); out1: return rc; } late_initcall(bareudp_init_module); static void __exit bareudp_cleanup_module(void) { rtnl_link_unregister(&bareudp_link_ops); unregister_pernet_subsys(&bareudp_net_ops); } module_exit(bareudp_cleanup_module); MODULE_ALIAS_RTNL_LINK("bareudp"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Varghese <[email protected]>"); MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");
linux-master
drivers/net/bareudp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * vrf.c: device driver to encapsulate a VRF space * * Copyright (c) 2015 Cumulus Networks. All rights reserved. * Copyright (c) 2015 Shrijeet Mukherjee <[email protected]> * Copyright (c) 2015 David Ahern <[email protected]> * * Based on dummy, team and ipvlan drivers */ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/netfilter.h> #include <linux/rtnetlink.h> #include <net/rtnetlink.h> #include <linux/u64_stats_sync.h> #include <linux/hashtable.h> #include <linux/spinlock_types.h> #include <linux/inetdevice.h> #include <net/arp.h> #include <net/ip.h> #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/ip6_route.h> #include <net/route.h> #include <net/addrconf.h> #include <net/l3mdev.h> #include <net/fib_rules.h> #include <net/sch_generic.h> #include <net/netns/generic.h> #include <net/netfilter/nf_conntrack.h> #define DRV_NAME "vrf" #define DRV_VERSION "1.1" #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ #define HT_MAP_BITS 4 #define HASH_INITVAL ((u32)0xcafef00d) struct vrf_map { DECLARE_HASHTABLE(ht, HT_MAP_BITS); spinlock_t vmap_lock; /* shared_tables: * count how many distinct tables do not comply with the strict mode * requirement. * shared_tables value must be 0 in order to enable the strict mode. * * example of the evolution of shared_tables: * | time * add vrf0 --> table 100 shared_tables = 0 | t0 * add vrf1 --> table 101 shared_tables = 0 | t1 * add vrf2 --> table 100 shared_tables = 1 | t2 * add vrf3 --> table 100 shared_tables = 1 | t3 * add vrf4 --> table 101 shared_tables = 2 v t4 * * shared_tables is a "step function" (or "staircase function") * and it is increased by one when the second vrf is associated to a * table. * * at t2, vrf0 and vrf2 are bound to table 100: shared_tables = 1. * * at t3, another dev (vrf3) is bound to the same table 100 but the * value of shared_tables is still 1. * This means that no matter how many new vrfs will register on the * table 100, the shared_tables will not increase (considering only * table 100). * * at t4, vrf4 is bound to table 101, and shared_tables = 2. * * Looking at the value of shared_tables we can immediately know if * the strict_mode can or cannot be enforced. Indeed, strict_mode * can be enforced iff shared_tables = 0. * * Conversely, shared_tables is decreased when a vrf is de-associated * from a table with exactly two associated vrfs. */ u32 shared_tables; bool strict_mode; }; struct vrf_map_elem { struct hlist_node hnode; struct list_head vrf_list; /* VRFs registered to this table */ u32 table_id; int users; int ifindex; }; static unsigned int vrf_net_id; /* per netns vrf data */ struct netns_vrf { /* protected by rtnl lock */ bool add_fib_rules; struct vrf_map vmap; struct ctl_table_header *ctl_hdr; }; struct net_vrf { struct rtable __rcu *rth; struct rt6_info __rcu *rt6; #if IS_ENABLED(CONFIG_IPV6) struct fib6_table *fib6_table; #endif u32 tb_id; struct list_head me_list; /* entry in vrf_map_elem */ int ifindex; }; struct pcpu_dstats { u64 tx_pkts; u64 tx_bytes; u64 tx_drps; u64 rx_pkts; u64 rx_bytes; u64 rx_drps; struct u64_stats_sync syncp; }; static void vrf_rx_stats(struct net_device *dev, int len) { struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); dstats->rx_pkts++; dstats->rx_bytes += len; u64_stats_update_end(&dstats->syncp); } static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) { vrf_dev->stats.tx_errors++; kfree_skb(skb); } static void vrf_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; for_each_possible_cpu(i) { const struct pcpu_dstats *dstats; u64 tbytes, tpkts, tdrops, rbytes, rpkts; unsigned int start; dstats = per_cpu_ptr(dev->dstats, i); do { start = u64_stats_fetch_begin(&dstats->syncp); tbytes = dstats->tx_bytes; tpkts = dstats->tx_pkts; tdrops = dstats->tx_drps; rbytes = dstats->rx_bytes; rpkts = dstats->rx_pkts; } while (u64_stats_fetch_retry(&dstats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpkts; stats->tx_dropped += tdrops; stats->rx_bytes += rbytes; stats->rx_packets += rpkts; } } static struct vrf_map *netns_vrf_map(struct net *net) { struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); return &nn_vrf->vmap; } static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev) { return netns_vrf_map(dev_net(dev)); } static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem *me) { struct list_head *me_head = &me->vrf_list; struct net_vrf *vrf; if (list_empty(me_head)) return -ENODEV; vrf = list_first_entry(me_head, struct net_vrf, me_list); return vrf->ifindex; } static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags) { struct vrf_map_elem *me; me = kmalloc(sizeof(*me), flags); if (!me) return NULL; return me; } static void vrf_map_elem_free(struct vrf_map_elem *me) { kfree(me); } static void vrf_map_elem_init(struct vrf_map_elem *me, int table_id, int ifindex, int users) { me->table_id = table_id; me->ifindex = ifindex; me->users = users; INIT_LIST_HEAD(&me->vrf_list); } static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap, u32 table_id) { struct vrf_map_elem *me; u32 key; key = jhash_1word(table_id, HASH_INITVAL); hash_for_each_possible(vmap->ht, me, hnode, key) { if (me->table_id == table_id) return me; } return NULL; } static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me) { u32 table_id = me->table_id; u32 key; key = jhash_1word(table_id, HASH_INITVAL); hash_add(vmap->ht, &me->hnode, key); } static void vrf_map_del_elem(struct vrf_map_elem *me) { hash_del(&me->hnode); } static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) { spin_lock(&vmap->vmap_lock); } static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) { spin_unlock(&vmap->vmap_lock); } /* called with rtnl lock held */ static int vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack) { struct vrf_map *vmap = netns_vrf_map_by_dev(dev); struct net_vrf *vrf = netdev_priv(dev); struct vrf_map_elem *new_me, *me; u32 table_id = vrf->tb_id; bool free_new_me = false; int users; int res; /* we pre-allocate elements used in the spin-locked section (so that we * keep the spinlock as short as possible). */ new_me = vrf_map_elem_alloc(GFP_KERNEL); if (!new_me) return -ENOMEM; vrf_map_elem_init(new_me, table_id, dev->ifindex, 0); vrf_map_lock(vmap); me = vrf_map_lookup_elem(vmap, table_id); if (!me) { me = new_me; vrf_map_add_elem(vmap, me); goto link_vrf; } /* we already have an entry in the vrf_map, so it means there is (at * least) a vrf registered on the specific table. */ free_new_me = true; if (vmap->strict_mode) { /* vrfs cannot share the same table */ NL_SET_ERR_MSG(extack, "Table is used by another VRF"); res = -EBUSY; goto unlock; } link_vrf: users = ++me->users; if (users == 2) ++vmap->shared_tables; list_add(&vrf->me_list, &me->vrf_list); res = 0; unlock: vrf_map_unlock(vmap); /* clean-up, if needed */ if (free_new_me) vrf_map_elem_free(new_me); return res; } /* called with rtnl lock held */ static void vrf_map_unregister_dev(struct net_device *dev) { struct vrf_map *vmap = netns_vrf_map_by_dev(dev); struct net_vrf *vrf = netdev_priv(dev); u32 table_id = vrf->tb_id; struct vrf_map_elem *me; int users; vrf_map_lock(vmap); me = vrf_map_lookup_elem(vmap, table_id); if (!me) goto unlock; list_del(&vrf->me_list); users = --me->users; if (users == 1) { --vmap->shared_tables; } else if (users == 0) { vrf_map_del_elem(me); /* no one will refer to this element anymore */ vrf_map_elem_free(me); } unlock: vrf_map_unlock(vmap); } /* return the vrf device index associated with the table_id */ static int vrf_ifindex_lookup_by_table_id(struct net *net, u32 table_id) { struct vrf_map *vmap = netns_vrf_map(net); struct vrf_map_elem *me; int ifindex; vrf_map_lock(vmap); if (!vmap->strict_mode) { ifindex = -EPERM; goto unlock; } me = vrf_map_lookup_elem(vmap, table_id); if (!me) { ifindex = -ENODEV; goto unlock; } ifindex = vrf_map_elem_get_vrf_ifindex(me); unlock: vrf_map_unlock(vmap); return ifindex; } /* by default VRF devices do not have a qdisc and are expected * to be created with only a single queue. */ static bool qdisc_tx_is_default(const struct net_device *dev) { struct netdev_queue *txq; struct Qdisc *qdisc; if (dev->num_tx_queues > 1) return false; txq = netdev_get_tx_queue(dev, 0); qdisc = rcu_access_pointer(txq->qdisc); return !qdisc->enqueue; } /* Local traffic destined to local address. Reinsert the packet to rx * path, similar to loopback handling. */ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, struct dst_entry *dst) { int len = skb->len; skb_orphan(skb); skb_dst_set(skb, dst); /* set pkt_type to avoid skb hitting packet taps twice - * once on Tx and again in Rx processing */ skb->pkt_type = PACKET_LOOPBACK; skb->protocol = eth_type_trans(skb, dev); if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) vrf_rx_stats(dev, len); else this_cpu_inc(dev->dstats->rx_drps); return NETDEV_TX_OK; } static void vrf_nf_set_untracked(struct sk_buff *skb) { if (skb_get_nfct(skb) == 0) nf_ct_set(skb, NULL, IP_CT_UNTRACKED); } static void vrf_nf_reset_ct(struct sk_buff *skb) { if (skb_get_nfct(skb) == IP_CT_UNTRACKED) nf_reset_ct(skb); } #if IS_ENABLED(CONFIG_IPV6) static int vrf_ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; vrf_nf_reset_ct(skb); err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); if (likely(err == 1)) err = dst_output(net, sk, skb); return err; } static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, struct net_device *dev) { const struct ipv6hdr *iph; struct net *net = dev_net(skb->dev); struct flowi6 fl6; int ret = NET_XMIT_DROP; struct dst_entry *dst; struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) goto err; iph = ipv6_hdr(skb); memset(&fl6, 0, sizeof(fl6)); /* needed to match OIF rule */ fl6.flowi6_l3mdev = dev->ifindex; fl6.flowi6_iif = LOOPBACK_IFINDEX; fl6.daddr = iph->daddr; fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = iph->nexthdr; dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL); if (IS_ERR(dst) || dst == dst_null) goto err; skb_dst_drop(skb); /* if dst.dev is the VRF device again this is locally originated traffic * destined to a local address. Short circuit to Rx path. */ if (dst->dev == dev) return vrf_local_xmit(skb, dev, dst); skb_dst_set(skb, dst); /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); ret = vrf_ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(ret))) dev->stats.tx_errors++; else ret = NET_XMIT_SUCCESS; return ret; err: vrf_tx_error(dev, skb); return NET_XMIT_DROP; } #else static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, struct net_device *dev) { vrf_tx_error(dev, skb); return NET_XMIT_DROP; } #endif /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */ static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; vrf_nf_reset_ct(skb); err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); if (likely(err == 1)) err = dst_output(net, sk, skb); return err; } static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { struct iphdr *ip4h; int ret = NET_XMIT_DROP; struct flowi4 fl4; struct net *net = dev_net(vrf_dev); struct rtable *rt; if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) goto err; ip4h = ip_hdr(skb); memset(&fl4, 0, sizeof(fl4)); /* needed to match OIF rule */ fl4.flowi4_l3mdev = vrf_dev->ifindex; fl4.flowi4_iif = LOOPBACK_IFINDEX; fl4.flowi4_tos = RT_TOS(ip4h->tos); fl4.flowi4_flags = FLOWI_FLAG_ANYSRC; fl4.flowi4_proto = ip4h->protocol; fl4.daddr = ip4h->daddr; fl4.saddr = ip4h->saddr; rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; skb_dst_drop(skb); /* if dst.dev is the VRF device again this is locally originated traffic * destined to a local address. Short circuit to Rx path. */ if (rt->dst.dev == vrf_dev) return vrf_local_xmit(skb, vrf_dev, &rt->dst); skb_dst_set(skb, &rt->dst); /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); if (!ip4h->saddr) { ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, RT_SCOPE_LINK); } memset(IPCB(skb), 0, sizeof(*IPCB(skb))); ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(net_xmit_eval(ret))) vrf_dev->stats.tx_errors++; else ret = NET_XMIT_SUCCESS; out: return ret; err: vrf_tx_error(vrf_dev, skb); goto out; } static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) { switch (skb->protocol) { case htons(ETH_P_IP): return vrf_process_v4_outbound(skb, dev); case htons(ETH_P_IPV6): return vrf_process_v6_outbound(skb, dev); default: vrf_tx_error(dev, skb); return NET_XMIT_DROP; } } static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) { int len = skb->len; netdev_tx_t ret = is_ip_tx_frame(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); dstats->tx_pkts++; dstats->tx_bytes += len; u64_stats_update_end(&dstats->syncp); } else { this_cpu_inc(dev->dstats->tx_drps); } return ret; } static void vrf_finish_direct(struct sk_buff *skb) { struct net_device *vrf_dev = skb->dev; if (!list_empty(&vrf_dev->ptype_all) && likely(skb_headroom(skb) >= ETH_HLEN)) { struct ethhdr *eth = skb_push(skb, ETH_HLEN); ether_addr_copy(eth->h_source, vrf_dev->dev_addr); eth_zero_addr(eth->h_dest); eth->h_proto = skb->protocol; dev_queue_xmit_nit(skb, vrf_dev); skb_pull(skb, ETH_HLEN); } vrf_nf_reset_ct(skb); } #if IS_ENABLED(CONFIG_IPV6) /* modelled after ip6_finish_output2 */ static int vrf_finish_output6(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; const struct in6_addr *nexthop; struct neighbour *neigh; int ret; vrf_nf_reset_ct(skb); skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; rcu_read_lock(); nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); ret = neigh_output(neigh, skb, false); rcu_read_unlock(); return ret; } rcu_read_unlock(); IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; } /* modelled after ip6_output */ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) { return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb, NULL, skb_dst(skb)->dev, vrf_finish_output6, !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } /* set dst on skb to send packet to us via dev_xmit path. Allows * packet to go through device based features such as qdisc, netfilter * hooks and packet sockets with skb->dev set to vrf device. */ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev, struct sk_buff *skb) { struct net_vrf *vrf = netdev_priv(vrf_dev); struct dst_entry *dst = NULL; struct rt6_info *rt6; rcu_read_lock(); rt6 = rcu_dereference(vrf->rt6); if (likely(rt6)) { dst = &rt6->dst; dst_hold(dst); } rcu_read_unlock(); if (unlikely(!dst)) { vrf_tx_error(vrf_dev, skb); return NULL; } skb_dst_drop(skb); skb_dst_set(skb, dst); return skb; } static int vrf_output6_direct_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { vrf_finish_direct(skb); return vrf_ip6_local_out(net, sk, skb); } static int vrf_output6_direct(struct net *net, struct sock *sk, struct sk_buff *skb) { int err = 1; skb->protocol = htons(ETH_P_IPV6); if (!(IPCB(skb)->flags & IPSKB_REROUTED)) err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb, NULL, skb->dev, vrf_output6_direct_finish); if (likely(err == 1)) vrf_finish_direct(skb); return err; } static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; err = vrf_output6_direct(net, sk, skb); if (likely(err == 1)) err = vrf_ip6_local_out(net, sk, skb); return err; } static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb) { struct net *net = dev_net(vrf_dev); int err; skb->dev = vrf_dev; err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); if (likely(err == 1)) err = vrf_output6_direct(net, sk, skb); if (likely(err == 1)) return skb; return NULL; } static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb) { /* don't divert link scope packets */ if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) return skb; vrf_nf_set_untracked(skb); if (qdisc_tx_is_default(vrf_dev) || IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return vrf_ip6_out_direct(vrf_dev, sk, skb); return vrf_ip6_out_redirect(vrf_dev, skb); } /* holding rtnl */ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); struct net *net = dev_net(dev); struct dst_entry *dst; RCU_INIT_POINTER(vrf->rt6, NULL); synchronize_rcu(); /* move dev in dst's to loopback so this VRF device can be deleted * - based on dst_ifdown */ if (rt6) { dst = &rt6->dst; netdev_ref_replace(dst->dev, net->loopback_dev, &dst->dev_tracker, GFP_KERNEL); dst->dev = net->loopback_dev; dst_release(dst); } } static int vrf_rt6_create(struct net_device *dev) { int flags = DST_NOPOLICY | DST_NOXFRM; struct net_vrf *vrf = netdev_priv(dev); struct net *net = dev_net(dev); struct rt6_info *rt6; int rc = -ENOMEM; /* IPv6 can be CONFIG enabled and then disabled runtime */ if (!ipv6_mod_enabled()) return 0; vrf->fib6_table = fib6_new_table(net, vrf->tb_id); if (!vrf->fib6_table) goto out; /* create a dst for routing packets out a VRF device */ rt6 = ip6_dst_alloc(net, dev, flags); if (!rt6) goto out; rt6->dst.output = vrf_output6; rcu_assign_pointer(vrf->rt6, rt6); rc = 0; out: return rc; } #else static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb) { return skb; } static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { } static int vrf_rt6_create(struct net_device *dev) { return 0; } #endif /* modelled after ip_finish_output2 */ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct rtable *rt = (struct rtable *)dst; struct net_device *dev = dst->dev; unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; bool is_v6gw = false; vrf_nf_reset_ct(skb); /* Be paranoid, rather than too clever. */ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { skb = skb_expand_head(skb, hh_len); if (!skb) { dev->stats.tx_errors++; return -ENOMEM; } } rcu_read_lock(); neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); if (!IS_ERR(neigh)) { int ret; sock_confirm_neigh(skb, neigh); /* if crossing protocols, can not use the cached header */ ret = neigh_output(neigh, skb, is_v6gw); rcu_read_unlock(); return ret; } rcu_read_unlock(); vrf_tx_error(skb->dev, skb); return -EINVAL; } static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); skb->dev = dev; skb->protocol = htons(ETH_P_IP); return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, NULL, dev, vrf_finish_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); } /* set dst on skb to send packet to us via dev_xmit path. Allows * packet to go through device based features such as qdisc, netfilter * hooks and packet sockets with skb->dev set to vrf device. */ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev, struct sk_buff *skb) { struct net_vrf *vrf = netdev_priv(vrf_dev); struct dst_entry *dst = NULL; struct rtable *rth; rcu_read_lock(); rth = rcu_dereference(vrf->rth); if (likely(rth)) { dst = &rth->dst; dst_hold(dst); } rcu_read_unlock(); if (unlikely(!dst)) { vrf_tx_error(vrf_dev, skb); return NULL; } skb_dst_drop(skb); skb_dst_set(skb, dst); return skb; } static int vrf_output_direct_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { vrf_finish_direct(skb); return vrf_ip_local_out(net, sk, skb); } static int vrf_output_direct(struct net *net, struct sock *sk, struct sk_buff *skb) { int err = 1; skb->protocol = htons(ETH_P_IP); if (!(IPCB(skb)->flags & IPSKB_REROUTED)) err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, NULL, skb->dev, vrf_output_direct_finish); if (likely(err == 1)) vrf_finish_direct(skb); return err; } static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; err = vrf_output_direct(net, sk, skb); if (likely(err == 1)) err = vrf_ip_local_out(net, sk, skb); return err; } static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb) { struct net *net = dev_net(vrf_dev); int err; skb->dev = vrf_dev; err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip_out_direct_finish); if (likely(err == 1)) err = vrf_output_direct(net, sk, skb); if (likely(err == 1)) return skb; return NULL; } static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb) { /* don't divert multicast or local broadcast */ if (ipv4_is_multicast(ip_hdr(skb)->daddr) || ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; vrf_nf_set_untracked(skb); if (qdisc_tx_is_default(vrf_dev) || IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return vrf_ip_out_direct(vrf_dev, sk, skb); return vrf_ip_out_redirect(vrf_dev, skb); } /* called with rcu lock held */ static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb, u16 proto) { switch (proto) { case AF_INET: return vrf_ip_out(vrf_dev, sk, skb); case AF_INET6: return vrf_ip6_out(vrf_dev, sk, skb); } return skb; } /* holding rtnl */ static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) { struct rtable *rth = rtnl_dereference(vrf->rth); struct net *net = dev_net(dev); struct dst_entry *dst; RCU_INIT_POINTER(vrf->rth, NULL); synchronize_rcu(); /* move dev in dst's to loopback so this VRF device can be deleted * - based on dst_ifdown */ if (rth) { dst = &rth->dst; netdev_ref_replace(dst->dev, net->loopback_dev, &dst->dev_tracker, GFP_KERNEL); dst->dev = net->loopback_dev; dst_release(dst); } } static int vrf_rtable_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); struct rtable *rth; if (!fib_new_table(dev_net(dev), vrf->tb_id)) return -ENOMEM; /* create a dst for routing packets out through a VRF device */ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1); if (!rth) return -ENOMEM; rth->dst.output = vrf_output; rcu_assign_pointer(vrf->rth, rth); return 0; } /**************************** device handling ********************/ /* cycle interface to flush neighbor cache and move routes across tables */ static void cycle_netdev(struct net_device *dev, struct netlink_ext_ack *extack) { unsigned int flags = dev->flags; int ret; if (!netif_running(dev)) return; ret = dev_change_flags(dev, flags & ~IFF_UP, extack); if (ret >= 0) ret = dev_change_flags(dev, flags, extack); if (ret < 0) { netdev_err(dev, "Failed to cycle device %s; route tables might be wrong!\n", dev->name); } } static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev, struct netlink_ext_ack *extack) { int ret; /* do not allow loopback device to be enslaved to a VRF. * The vrf device acts as the loopback for the vrf. */ if (port_dev == dev_net(dev)->loopback_dev) { NL_SET_ERR_MSG(extack, "Can not enslave loopback device to a VRF"); return -EOPNOTSUPP; } port_dev->priv_flags |= IFF_L3MDEV_SLAVE; ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack); if (ret < 0) goto err; cycle_netdev(port_dev, extack); return 0; err: port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; return ret; } static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev, struct netlink_ext_ack *extack) { if (netif_is_l3_master(port_dev)) { NL_SET_ERR_MSG(extack, "Can not enslave an L3 master device to a VRF"); return -EINVAL; } if (netif_is_l3_slave(port_dev)) return -EINVAL; return do_vrf_add_slave(dev, port_dev, extack); } /* inverse of do_vrf_add_slave */ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) { netdev_upper_dev_unlink(port_dev, dev); port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; cycle_netdev(port_dev, NULL); return 0; } static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) { return do_vrf_del_slave(dev, port_dev); } static void vrf_dev_uninit(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); vrf_rtable_release(dev, vrf); vrf_rt6_release(dev, vrf); free_percpu(dev->dstats); dev->dstats = NULL; } static int vrf_dev_init(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); if (!dev->dstats) goto out_nomem; /* create the default dst which points back to us */ if (vrf_rtable_create(dev) != 0) goto out_stats; if (vrf_rt6_create(dev) != 0) goto out_rth; dev->flags = IFF_MASTER | IFF_NOARP; /* similarly, oper state is irrelevant; set to up to avoid confusion */ dev->operstate = IF_OPER_UP; netdev_lockdep_set_classes(dev); return 0; out_rth: vrf_rtable_release(dev, vrf); out_stats: free_percpu(dev->dstats); dev->dstats = NULL; out_nomem: return -ENOMEM; } static const struct net_device_ops vrf_netdev_ops = { .ndo_init = vrf_dev_init, .ndo_uninit = vrf_dev_uninit, .ndo_start_xmit = vrf_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = vrf_get_stats64, .ndo_add_slave = vrf_add_slave, .ndo_del_slave = vrf_del_slave, }; static u32 vrf_fib_table(const struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); return vrf->tb_id; } static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { kfree_skb(skb); return 0; } static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook, struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1) skb = NULL; /* kfree_skb(skb) handled by nf code */ return skb; } static int vrf_prepare_mac_header(struct sk_buff *skb, struct net_device *vrf_dev, u16 proto) { struct ethhdr *eth; int err; /* in general, we do not know if there is enough space in the head of * the packet for hosting the mac header. */ err = skb_cow_head(skb, LL_RESERVED_SPACE(vrf_dev)); if (unlikely(err)) /* no space in the skb head */ return -ENOBUFS; __skb_push(skb, ETH_HLEN); eth = (struct ethhdr *)skb->data; skb_reset_mac_header(skb); skb_reset_mac_len(skb); /* we set the ethernet destination and the source addresses to the * address of the VRF device. */ ether_addr_copy(eth->h_dest, vrf_dev->dev_addr); ether_addr_copy(eth->h_source, vrf_dev->dev_addr); eth->h_proto = htons(proto); /* the destination address of the Ethernet frame corresponds to the * address set on the VRF interface; therefore, the packet is intended * to be processed locally. */ skb->protocol = eth->h_proto; skb->pkt_type = PACKET_HOST; skb_postpush_rcsum(skb, skb->data, ETH_HLEN); skb_pull_inline(skb, ETH_HLEN); return 0; } /* prepare and add the mac header to the packet if it was not set previously. * In this way, packet sniffers such as tcpdump can parse the packet correctly. * If the mac header was already set, the original mac header is left * untouched and the function returns immediately. */ static int vrf_add_mac_header_if_unset(struct sk_buff *skb, struct net_device *vrf_dev, u16 proto, struct net_device *orig_dev) { if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev)) return 0; return vrf_prepare_mac_header(skb, vrf_dev, proto); } #if IS_ENABLED(CONFIG_IPV6) /* neighbor handling is done with actual device; do not want * to flip skb->dev for those ndisc packets. This really fails * for multiple next protocols (e.g., NEXTHDR_HOP). But it is * a start. */ static bool ipv6_ndisc_frame(const struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); bool rc = false; if (iph->nexthdr == NEXTHDR_ICMP) { const struct icmp6hdr *icmph; struct icmp6hdr _icmph; icmph = skb_header_pointer(skb, sizeof(*iph), sizeof(_icmph), &_icmph); if (!icmph) goto out; switch (icmph->icmp6_type) { case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: rc = true; break; } } out: return rc; } static struct rt6_info *vrf_ip6_route_lookup(struct net *net, const struct net_device *dev, struct flowi6 *fl6, int ifindex, const struct sk_buff *skb, int flags) { struct net_vrf *vrf = netdev_priv(dev); return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags); } static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, int ifindex) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct flowi6 fl6 = { .flowi6_iif = ifindex, .flowi6_mark = skb->mark, .flowi6_proto = iph->nexthdr, .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), }; struct net *net = dev_net(vrf_dev); struct rt6_info *rt6; rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb, RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); if (unlikely(!rt6)) return; if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst)) return; skb_dst_set(skb, &rt6->dst); } static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { int orig_iif = skb->skb_iif; bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); bool is_ndisc = ipv6_ndisc_frame(skb); /* loopback, multicast & non-ND link-local traffic; do not push through * packet taps again. Reset pkt_type for upper layers to process skb. * For non-loopback strict packets, determine the dst using the original * ifindex. */ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IP6CB(skb)->flags |= IP6SKB_L3SLAVE; if (skb->pkt_type == PACKET_LOOPBACK) skb->pkt_type = PACKET_HOST; else vrf_ip6_input_dst(skb, vrf_dev, orig_iif); goto out; } /* if packet is NDISC then keep the ingress interface */ if (!is_ndisc) { struct net_device *orig_dev = skb->dev; vrf_rx_stats(vrf_dev, skb->len); skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; if (!list_empty(&vrf_dev->ptype_all)) { int err; err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IPV6, orig_dev); if (likely(!err)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); skb_pull(skb, skb->mac_len); } } IP6CB(skb)->flags |= IP6SKB_L3SLAVE; } if (need_strict) vrf_ip6_input_dst(skb, vrf_dev, orig_iif); skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev); out: return skb; } #else static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { return skb; } #endif static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { struct net_device *orig_dev = skb->dev; skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IPCB(skb)->flags |= IPSKB_L3SLAVE; if (ipv4_is_multicast(ip_hdr(skb)->daddr)) goto out; /* loopback traffic; do not push through packet taps again. * Reset pkt_type for upper layers to process skb */ if (skb->pkt_type == PACKET_LOOPBACK) { skb->pkt_type = PACKET_HOST; goto out; } vrf_rx_stats(vrf_dev, skb->len); if (!list_empty(&vrf_dev->ptype_all)) { int err; err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP, orig_dev); if (likely(!err)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); skb_pull(skb, skb->mac_len); } } skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev); out: return skb; } /* called with rcu lock held */ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev, struct sk_buff *skb, u16 proto) { switch (proto) { case AF_INET: return vrf_ip_rcv(vrf_dev, skb); case AF_INET6: return vrf_ip6_rcv(vrf_dev, skb); } return skb; } #if IS_ENABLED(CONFIG_IPV6) /* send to link-local or multicast address via interface enslaved to * VRF device. Force lookup to VRF table without changing flow struct * Note: Caller to this function must hold rcu_read_lock() and no refcnt * is taken on the dst by this function. */ static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev, struct flowi6 *fl6) { struct net *net = dev_net(dev); int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_DST_NOREF; struct dst_entry *dst = NULL; struct rt6_info *rt; /* VRF device does not have a link-local address and * sending packets to link-local or mcast addresses over * a VRF device does not make sense */ if (fl6->flowi6_oif == dev->ifindex) { dst = &net->ipv6.ip6_null_entry->dst; return dst; } if (!ipv6_addr_any(&fl6->saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR; rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags); if (rt) dst = &rt->dst; return dst; } #endif static const struct l3mdev_ops vrf_l3mdev_ops = { .l3mdev_fib_table = vrf_fib_table, .l3mdev_l3_rcv = vrf_l3_rcv, .l3mdev_l3_out = vrf_l3_out, #if IS_ENABLED(CONFIG_IPV6) .l3mdev_link_scope_lookup = vrf_link_scope_lookup, #endif }; static void vrf_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); } static const struct ethtool_ops vrf_ethtool_ops = { .get_drvinfo = vrf_get_drvinfo, }; static inline size_t vrf_fib_rule_nl_size(void) { size_t sz; sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */ return sz; } static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) { struct fib_rule_hdr *frh; struct nlmsghdr *nlh; struct sk_buff *skb; int err; if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) && !ipv6_mod_enabled()) return 0; skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL); if (!skb) return -ENOMEM; nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0); if (!nlh) goto nla_put_failure; /* rule only needs to appear once */ nlh->nlmsg_flags |= NLM_F_EXCL; frh = nlmsg_data(nlh); memset(frh, 0, sizeof(*frh)); frh->family = family; frh->action = FR_ACT_TO_TBL; if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL)) goto nla_put_failure; if (nla_put_u8(skb, FRA_L3MDEV, 1)) goto nla_put_failure; if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF)) goto nla_put_failure; nlmsg_end(skb, nlh); /* fib_nl_{new,del}rule handling looks for net from skb->sk */ skb->sk = dev_net(dev)->rtnl; if (add_it) { err = fib_nl_newrule(skb, nlh, NULL); if (err == -EEXIST) err = 0; } else { err = fib_nl_delrule(skb, nlh, NULL); if (err == -ENOENT) err = 0; } nlmsg_free(skb); return err; nla_put_failure: nlmsg_free(skb); return -EMSGSIZE; } static int vrf_add_fib_rules(const struct net_device *dev) { int err; err = vrf_fib_rule(dev, AF_INET, true); if (err < 0) goto out_err; err = vrf_fib_rule(dev, AF_INET6, true); if (err < 0) goto ipv6_err; #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true); if (err < 0) goto ipmr_err; #endif #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true); if (err < 0) goto ip6mr_err; #endif return 0; #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) ip6mr_err: vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false); #endif #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) ipmr_err: vrf_fib_rule(dev, AF_INET6, false); #endif ipv6_err: vrf_fib_rule(dev, AF_INET, false); out_err: netdev_err(dev, "Failed to add FIB rules.\n"); return err; } static void vrf_setup(struct net_device *dev) { ether_setup(dev); /* Initialize the device structure. */ dev->netdev_ops = &vrf_netdev_ops; dev->l3mdev_ops = &vrf_l3mdev_ops; dev->ethtool_ops = &vrf_ethtool_ops; dev->needs_free_netdev = true; /* Fill in device structure with ethernet-generic values. */ eth_hw_addr_random(dev); /* don't acquire vrf device's netif_tx_lock when transmitting */ dev->features |= NETIF_F_LLTX; /* don't allow vrf devices to change network namespaces. */ dev->features |= NETIF_F_NETNS_LOCAL; /* does not make sense for a VLAN to be added to a vrf device */ dev->features |= NETIF_F_VLAN_CHALLENGED; /* enable offload features */ dev->features |= NETIF_F_GSO_SOFTWARE; dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; dev->hw_features = dev->features; dev->hw_enc_features = dev->features; /* default to no qdisc; user can add if desired */ dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_NO_RX_HANDLER; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; /* VRF devices do not care about MTU, but if the MTU is set * too low then the ipv4 and ipv6 protocols are disabled * which breaks networking. */ dev->min_mtu = IPV6_MIN_MTU; dev->max_mtu = IP6_MAX_MTU; dev->mtu = dev->max_mtu; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { NL_SET_ERR_MSG(extack, "Invalid hardware address"); return -EINVAL; } if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { NL_SET_ERR_MSG(extack, "Invalid hardware address"); return -EADDRNOTAVAIL; } } return 0; } static void vrf_dellink(struct net_device *dev, struct list_head *head) { struct net_device *port_dev; struct list_head *iter; netdev_for_each_lower_dev(dev, port_dev, iter) vrf_del_slave(dev, port_dev); vrf_map_unregister_dev(dev); unregister_netdevice_queue(dev, head); } static int vrf_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct net_vrf *vrf = netdev_priv(dev); struct netns_vrf *nn_vrf; bool *add_fib_rules; struct net *net; int err; if (!data || !data[IFLA_VRF_TABLE]) { NL_SET_ERR_MSG(extack, "VRF table id is missing"); return -EINVAL; } vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); if (vrf->tb_id == RT_TABLE_UNSPEC) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE], "Invalid VRF table id"); return -EINVAL; } dev->priv_flags |= IFF_L3MDEV_MASTER; err = register_netdevice(dev); if (err) goto out; /* mapping between table_id and vrf; * note: such binding could not be done in the dev init function * because dev->ifindex id is not available yet. */ vrf->ifindex = dev->ifindex; err = vrf_map_register_dev(dev, extack); if (err) { unregister_netdevice(dev); goto out; } net = dev_net(dev); nn_vrf = net_generic(net, vrf_net_id); add_fib_rules = &nn_vrf->add_fib_rules; if (*add_fib_rules) { err = vrf_add_fib_rules(dev); if (err) { vrf_map_unregister_dev(dev); unregister_netdevice(dev); goto out; } *add_fib_rules = false; } out: return err; } static size_t vrf_nl_getsize(const struct net_device *dev) { return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */ } static int vrf_fillinfo(struct sk_buff *skb, const struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); } static size_t vrf_get_slave_size(const struct net_device *bond_dev, const struct net_device *slave_dev) { return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */ } static int vrf_fill_slave_info(struct sk_buff *skb, const struct net_device *vrf_dev, const struct net_device *slave_dev) { struct net_vrf *vrf = netdev_priv(vrf_dev); if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id)) return -EMSGSIZE; return 0; } static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { [IFLA_VRF_TABLE] = { .type = NLA_U32 }, }; static struct rtnl_link_ops vrf_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct net_vrf), .get_size = vrf_nl_getsize, .policy = vrf_nl_policy, .validate = vrf_validate, .fill_info = vrf_fillinfo, .get_slave_size = vrf_get_slave_size, .fill_slave_info = vrf_fill_slave_info, .newlink = vrf_newlink, .dellink = vrf_dellink, .setup = vrf_setup, .maxtype = IFLA_VRF_MAX, }; static int vrf_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); /* only care about unregister events to drop slave references */ if (event == NETDEV_UNREGISTER) { struct net_device *vrf_dev; if (!netif_is_l3_slave(dev)) goto out; vrf_dev = netdev_master_upper_dev_get(dev); vrf_del_slave(vrf_dev, dev); } out: return NOTIFY_DONE; } static struct notifier_block vrf_notifier_block __read_mostly = { .notifier_call = vrf_device_event, }; static int vrf_map_init(struct vrf_map *vmap) { spin_lock_init(&vmap->vmap_lock); hash_init(vmap->ht); vmap->strict_mode = false; return 0; } #ifdef CONFIG_SYSCTL static bool vrf_strict_mode(struct vrf_map *vmap) { bool strict_mode; vrf_map_lock(vmap); strict_mode = vmap->strict_mode; vrf_map_unlock(vmap); return strict_mode; } static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode) { bool *cur_mode; int res = 0; vrf_map_lock(vmap); cur_mode = &vmap->strict_mode; if (*cur_mode == new_mode) goto unlock; if (*cur_mode) { /* disable strict mode */ *cur_mode = false; } else { if (vmap->shared_tables) { /* we cannot allow strict_mode because there are some * vrfs that share one or more tables. */ res = -EBUSY; goto unlock; } /* no tables are shared among vrfs, so we can go back * to 1:1 association between a vrf with its table. */ *cur_mode = true; } unlock: vrf_map_unlock(vmap); return res; } static int vrf_shared_table_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct net *net = (struct net *)table->extra1; struct vrf_map *vmap = netns_vrf_map(net); int proc_strict_mode = 0; struct ctl_table tmp = { .procname = table->procname, .data = &proc_strict_mode, .maxlen = sizeof(int), .mode = table->mode, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }; int ret; if (!write) proc_strict_mode = vrf_strict_mode(vmap); ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && ret == 0) ret = vrf_strict_mode_change(vmap, (bool)proc_strict_mode); return ret; } static const struct ctl_table vrf_table[] = { { .procname = "strict_mode", .data = NULL, .maxlen = sizeof(int), .mode = 0644, .proc_handler = vrf_shared_table_handler, /* set by the vrf_netns_init */ .extra1 = NULL, }, { }, }; static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) { struct ctl_table *table; table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL); if (!table) return -ENOMEM; /* init the extra1 parameter with the reference to current netns */ table[0].extra1 = net; nn_vrf->ctl_hdr = register_net_sysctl_sz(net, "net/vrf", table, ARRAY_SIZE(vrf_table)); if (!nn_vrf->ctl_hdr) { kfree(table); return -ENOMEM; } return 0; } static void vrf_netns_exit_sysctl(struct net *net) { struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); struct ctl_table *table; table = nn_vrf->ctl_hdr->ctl_table_arg; unregister_net_sysctl_table(nn_vrf->ctl_hdr); kfree(table); } #else static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) { return 0; } static void vrf_netns_exit_sysctl(struct net *net) { } #endif /* Initialize per network namespace state */ static int __net_init vrf_netns_init(struct net *net) { struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); nn_vrf->add_fib_rules = true; vrf_map_init(&nn_vrf->vmap); return vrf_netns_init_sysctl(net, nn_vrf); } static void __net_exit vrf_netns_exit(struct net *net) { vrf_netns_exit_sysctl(net); } static struct pernet_operations vrf_net_ops __net_initdata = { .init = vrf_netns_init, .exit = vrf_netns_exit, .id = &vrf_net_id, .size = sizeof(struct netns_vrf), }; static int __init vrf_init_module(void) { int rc; register_netdevice_notifier(&vrf_notifier_block); rc = register_pernet_subsys(&vrf_net_ops); if (rc < 0) goto error; rc = l3mdev_table_lookup_register(L3MDEV_TYPE_VRF, vrf_ifindex_lookup_by_table_id); if (rc < 0) goto unreg_pernet; rc = rtnl_link_register(&vrf_link_ops); if (rc < 0) goto table_lookup_unreg; return 0; table_lookup_unreg: l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF, vrf_ifindex_lookup_by_table_id); unreg_pernet: unregister_pernet_subsys(&vrf_net_ops); error: unregister_netdevice_notifier(&vrf_notifier_block); return rc; } module_init(vrf_init_module); MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); MODULE_VERSION(DRV_VERSION);
linux-master
drivers/net/vrf.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/etherdevice.h> #include <linux/if_macvlan.h> #include <linux/if_tap.h> #include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/nsproxy.h> #include <linux/compat.h> #include <linux/if_tun.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/cache.h> #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/fs.h> #include <linux/uio.h> #include <net/net_namespace.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <linux/virtio_net.h> #include <linux/skb_array.h> struct macvtap_dev { struct macvlan_dev vlan; struct tap_dev tap; }; /* * Variables for dealing with macvtaps device numbers. */ static dev_t macvtap_major; static const void *macvtap_net_namespace(const struct device *d) { const struct net_device *dev = to_net_dev(d->parent); return dev_net(dev); } static struct class macvtap_class = { .name = "macvtap", .ns_type = &net_ns_type_operations, .namespace = macvtap_net_namespace, }; static struct cdev macvtap_cdev; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ NETIF_F_TSO6) static void macvtap_count_tx_dropped(struct tap_dev *tap) { struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); struct macvlan_dev *vlan = &vlantap->vlan; this_cpu_inc(vlan->pcpu_stats->tx_dropped); } static void macvtap_count_rx_dropped(struct tap_dev *tap) { struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); struct macvlan_dev *vlan = &vlantap->vlan; macvlan_count_rx(vlan, 0, 0, 0); } static void macvtap_update_features(struct tap_dev *tap, netdev_features_t features) { struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap); struct macvlan_dev *vlan = &vlantap->vlan; vlan->set_features = features; netdev_update_features(vlan->dev); } static int macvtap_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct macvtap_dev *vlantap = netdev_priv(dev); int err; INIT_LIST_HEAD(&vlantap->tap.queue_list); /* Since macvlan supports all offloads by default, make * tap support all offloads also. */ vlantap->tap.tap_features = TUN_OFFLOADS; /* Register callbacks for rx/tx drops accounting and updating * net_device features */ vlantap->tap.count_tx_dropped = macvtap_count_tx_dropped; vlantap->tap.count_rx_dropped = macvtap_count_rx_dropped; vlantap->tap.update_features = macvtap_update_features; err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap); if (err) return err; /* Don't put anything that may fail after macvlan_common_newlink * because we can't undo what it does. */ err = macvlan_common_newlink(src_net, dev, tb, data, extack); if (err) { netdev_rx_handler_unregister(dev); return err; } vlantap->tap.dev = vlantap->vlan.dev; return 0; } static void macvtap_dellink(struct net_device *dev, struct list_head *head) { struct macvtap_dev *vlantap = netdev_priv(dev); netdev_rx_handler_unregister(dev); tap_del_queues(&vlantap->tap); macvlan_dellink(dev, head); } static void macvtap_setup(struct net_device *dev) { macvlan_common_setup(dev); dev->tx_queue_len = TUN_READQ_SIZE; } static struct net *macvtap_link_net(const struct net_device *dev) { return dev_net(macvlan_dev_real_dev(dev)); } static struct rtnl_link_ops macvtap_link_ops __read_mostly = { .kind = "macvtap", .setup = macvtap_setup, .newlink = macvtap_newlink, .dellink = macvtap_dellink, .get_link_net = macvtap_link_net, .priv_size = sizeof(struct macvtap_dev), }; static int macvtap_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvtap_dev *vlantap; struct device *classdev; dev_t devt; int err; char tap_name[IFNAMSIZ]; if (dev->rtnl_link_ops != &macvtap_link_ops) return NOTIFY_DONE; snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex); vlantap = netdev_priv(dev); switch (event) { case NETDEV_REGISTER: /* Create the device node here after the network device has * been registered but before register_netdevice has * finished running. */ err = tap_get_minor(macvtap_major, &vlantap->tap); if (err) return notifier_from_errno(err); devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); classdev = device_create(&macvtap_class, &dev->dev, devt, dev, "%s", tap_name); if (IS_ERR(classdev)) { tap_free_minor(macvtap_major, &vlantap->tap); return notifier_from_errno(PTR_ERR(classdev)); } err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj, tap_name); if (err) return notifier_from_errno(err); break; case NETDEV_UNREGISTER: /* vlan->minor == 0 if NETDEV_REGISTER above failed */ if (vlantap->tap.minor == 0) break; sysfs_remove_link(&dev->dev.kobj, tap_name); devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); device_destroy(&macvtap_class, devt); tap_free_minor(macvtap_major, &vlantap->tap); break; case NETDEV_CHANGE_TX_QUEUE_LEN: if (tap_queue_resize(&vlantap->tap)) return NOTIFY_BAD; break; } return NOTIFY_DONE; } static struct notifier_block macvtap_notifier_block __read_mostly = { .notifier_call = macvtap_device_event, }; static int __init macvtap_init(void) { int err; err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap", THIS_MODULE); if (err) goto out1; err = class_register(&macvtap_class); if (err) goto out2; err = register_netdevice_notifier(&macvtap_notifier_block); if (err) goto out3; err = macvlan_link_register(&macvtap_link_ops); if (err) goto out4; return 0; out4: unregister_netdevice_notifier(&macvtap_notifier_block); out3: class_unregister(&macvtap_class); out2: tap_destroy_cdev(macvtap_major, &macvtap_cdev); out1: return err; } module_init(macvtap_init); static void __exit macvtap_exit(void) { rtnl_link_unregister(&macvtap_link_ops); unregister_netdevice_notifier(&macvtap_notifier_block); class_unregister(&macvtap_class); tap_destroy_cdev(macvtap_major, &macvtap_cdev); } module_exit(macvtap_exit); MODULE_ALIAS_RTNL_LINK("macvtap"); MODULE_AUTHOR("Arnd Bergmann <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/macvtap.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/etherdevice.h> #include <linux/if_tap.h> #include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/nsproxy.h> #include <linux/compat.h> #include <linux/if_tun.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/cache.h> #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/fs.h> #include <linux/uio.h> #include <net/gso.h> #include <net/net_namespace.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <net/xdp.h> #include <linux/virtio_net.h> #include <linux/skb_array.h> #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) #define TAP_VNET_LE 0x80000000 #define TAP_VNET_BE 0x40000000 #ifdef CONFIG_TUN_VNET_CROSS_LE static inline bool tap_legacy_is_little_endian(struct tap_queue *q) { return q->flags & TAP_VNET_BE ? false : virtio_legacy_is_little_endian(); } static long tap_get_vnet_be(struct tap_queue *q, int __user *sp) { int s = !!(q->flags & TAP_VNET_BE); if (put_user(s, sp)) return -EFAULT; return 0; } static long tap_set_vnet_be(struct tap_queue *q, int __user *sp) { int s; if (get_user(s, sp)) return -EFAULT; if (s) q->flags |= TAP_VNET_BE; else q->flags &= ~TAP_VNET_BE; return 0; } #else static inline bool tap_legacy_is_little_endian(struct tap_queue *q) { return virtio_legacy_is_little_endian(); } static long tap_get_vnet_be(struct tap_queue *q, int __user *argp) { return -EINVAL; } static long tap_set_vnet_be(struct tap_queue *q, int __user *argp) { return -EINVAL; } #endif /* CONFIG_TUN_VNET_CROSS_LE */ static inline bool tap_is_little_endian(struct tap_queue *q) { return q->flags & TAP_VNET_LE || tap_legacy_is_little_endian(q); } static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val) { return __virtio16_to_cpu(tap_is_little_endian(q), val); } static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val) { return __cpu_to_virtio16(tap_is_little_endian(q), val); } static struct proto tap_proto = { .name = "tap", .owner = THIS_MODULE, .obj_size = sizeof(struct tap_queue), }; #define TAP_NUM_DEVS (1U << MINORBITS) static LIST_HEAD(major_list); struct major_info { struct rcu_head rcu; dev_t major; struct idr minor_idr; spinlock_t minor_lock; const char *device_name; struct list_head next; }; #define GOODCOPY_LEN 128 static const struct proto_ops tap_socket_ops; #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->rx_handler_data); } /* * RCU usage: * The tap_queue and the macvlan_dev are loosely coupled, the * pointers from one to the other can only be read while rcu_read_lock * or rtnl is held. * * Both the file and the macvlan_dev hold a reference on the tap_queue * through sock_hold(&q->sk). When the macvlan_dev goes away first, * q->vlan becomes inaccessible. When the files gets closed, * tap_get_queue() fails. * * There may still be references to the struct sock inside of the * queue from outbound SKBs, but these never reference back to the * file or the dev. The data structure is freed through __sk_free * when both our references and any pending SKBs are gone. */ static int tap_enable_queue(struct tap_dev *tap, struct file *file, struct tap_queue *q) { int err = -EINVAL; ASSERT_RTNL(); if (q->enabled) goto out; err = 0; rcu_assign_pointer(tap->taps[tap->numvtaps], q); q->queue_index = tap->numvtaps; q->enabled = true; tap->numvtaps++; out: return err; } /* Requires RTNL */ static int tap_set_queue(struct tap_dev *tap, struct file *file, struct tap_queue *q) { if (tap->numqueues == MAX_TAP_QUEUES) return -EBUSY; rcu_assign_pointer(q->tap, tap); rcu_assign_pointer(tap->taps[tap->numvtaps], q); sock_hold(&q->sk); q->file = file; q->queue_index = tap->numvtaps; q->enabled = true; file->private_data = q; list_add_tail(&q->next, &tap->queue_list); tap->numvtaps++; tap->numqueues++; return 0; } static int tap_disable_queue(struct tap_queue *q) { struct tap_dev *tap; struct tap_queue *nq; ASSERT_RTNL(); if (!q->enabled) return -EINVAL; tap = rtnl_dereference(q->tap); if (tap) { int index = q->queue_index; BUG_ON(index >= tap->numvtaps); nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); nq->queue_index = index; rcu_assign_pointer(tap->taps[index], nq); RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL); q->enabled = false; tap->numvtaps--; } return 0; } /* * The file owning the queue got closed, give up both * the reference that the files holds as well as the * one from the macvlan_dev if that still exists. * * Using the spinlock makes sure that we don't get * to the queue again after destroying it. */ static void tap_put_queue(struct tap_queue *q) { struct tap_dev *tap; rtnl_lock(); tap = rtnl_dereference(q->tap); if (tap) { if (q->enabled) BUG_ON(tap_disable_queue(q)); tap->numqueues--; RCU_INIT_POINTER(q->tap, NULL); sock_put(&q->sk); list_del_init(&q->next); } rtnl_unlock(); synchronize_rcu(); sock_put(&q->sk); } /* * Select a queue based on the rxq of the device on which this packet * arrived. If the incoming device is not mq, calculate a flow hash * to select a queue. If all fails, find the first available queue. * Cache vlan->numvtaps since it can become zero during the execution * of this function. */ static struct tap_queue *tap_get_queue(struct tap_dev *tap, struct sk_buff *skb) { struct tap_queue *queue = NULL; /* Access to taps array is protected by rcu, but access to numvtaps * isn't. Below we use it to lookup a queue, but treat it as a hint * and validate that the result isn't NULL - in case we are * racing against queue removal. */ int numvtaps = READ_ONCE(tap->numvtaps); __u32 rxq; if (!numvtaps) goto out; if (numvtaps == 1) goto single; /* Check if we can use flow to select a queue */ rxq = skb_get_hash(skb); if (rxq) { queue = rcu_dereference(tap->taps[rxq % numvtaps]); goto out; } if (likely(skb_rx_queue_recorded(skb))) { rxq = skb_get_rx_queue(skb); while (unlikely(rxq >= numvtaps)) rxq -= numvtaps; queue = rcu_dereference(tap->taps[rxq]); goto out; } single: queue = rcu_dereference(tap->taps[0]); out: return queue; } /* * The net_device is going away, give up the reference * that it holds on all queues and safely set the pointer * from the queues to NULL. */ void tap_del_queues(struct tap_dev *tap) { struct tap_queue *q, *tmp; ASSERT_RTNL(); list_for_each_entry_safe(q, tmp, &tap->queue_list, next) { list_del_init(&q->next); RCU_INIT_POINTER(q->tap, NULL); if (q->enabled) tap->numvtaps--; tap->numqueues--; sock_put(&q->sk); } BUG_ON(tap->numvtaps); BUG_ON(tap->numqueues); /* guarantee that any future tap_set_queue will fail */ tap->numvtaps = MAX_TAP_QUEUES; } EXPORT_SYMBOL_GPL(tap_del_queues); rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct net_device *dev = skb->dev; struct tap_dev *tap; struct tap_queue *q; netdev_features_t features = TAP_FEATURES; enum skb_drop_reason drop_reason; tap = tap_dev_get_rcu(dev); if (!tap) return RX_HANDLER_PASS; q = tap_get_queue(tap, skb); if (!q) return RX_HANDLER_PASS; skb_push(skb, ETH_HLEN); /* Apply the forward feature mask so that we perform segmentation * according to users wishes. This only works if VNET_HDR is * enabled. */ if (q->flags & IFF_VNET_HDR) features |= tap->tap_features; if (netif_needs_gso(skb, features)) { struct sk_buff *segs = __skb_gso_segment(skb, features, false); struct sk_buff *next; if (IS_ERR(segs)) { drop_reason = SKB_DROP_REASON_SKB_GSO_SEG; goto drop; } if (!segs) { if (ptr_ring_produce(&q->ring, skb)) { drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; } goto wake_up; } consume_skb(skb); skb_list_walk_safe(segs, skb, next) { skb_mark_not_on_list(skb); if (ptr_ring_produce(&q->ring, skb)) { drop_reason = SKB_DROP_REASON_FULL_RING; kfree_skb_reason(skb, drop_reason); kfree_skb_list_reason(next, drop_reason); break; } } } else { /* If we receive a partial checksum and the tap side * doesn't support checksum offload, compute the checksum. * Note: it doesn't matter which checksum feature to * check, we either support them all or none. */ if (skb->ip_summed == CHECKSUM_PARTIAL && !(features & NETIF_F_CSUM_MASK) && skb_checksum_help(skb)) { drop_reason = SKB_DROP_REASON_SKB_CSUM; goto drop; } if (ptr_ring_produce(&q->ring, skb)) { drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; } } wake_up: wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); return RX_HANDLER_CONSUMED; drop: /* Count errors/drops only here, thus don't care about args. */ if (tap->count_rx_dropped) tap->count_rx_dropped(tap); kfree_skb_reason(skb, drop_reason); return RX_HANDLER_CONSUMED; } EXPORT_SYMBOL_GPL(tap_handle_frame); static struct major_info *tap_get_major(int major) { struct major_info *tap_major; list_for_each_entry_rcu(tap_major, &major_list, next) { if (tap_major->major == major) return tap_major; } return NULL; } int tap_get_minor(dev_t major, struct tap_dev *tap) { int retval = -ENOMEM; struct major_info *tap_major; rcu_read_lock(); tap_major = tap_get_major(MAJOR(major)); if (!tap_major) { retval = -EINVAL; goto unlock; } spin_lock(&tap_major->minor_lock); retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC); if (retval >= 0) { tap->minor = retval; } else if (retval == -ENOSPC) { netdev_err(tap->dev, "Too many tap devices\n"); retval = -EINVAL; } spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); return retval < 0 ? retval : 0; } EXPORT_SYMBOL_GPL(tap_get_minor); void tap_free_minor(dev_t major, struct tap_dev *tap) { struct major_info *tap_major; rcu_read_lock(); tap_major = tap_get_major(MAJOR(major)); if (!tap_major) { goto unlock; } spin_lock(&tap_major->minor_lock); if (tap->minor) { idr_remove(&tap_major->minor_idr, tap->minor); tap->minor = 0; } spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(tap_free_minor); static struct tap_dev *dev_get_by_tap_file(int major, int minor) { struct net_device *dev = NULL; struct tap_dev *tap; struct major_info *tap_major; rcu_read_lock(); tap_major = tap_get_major(major); if (!tap_major) { tap = NULL; goto unlock; } spin_lock(&tap_major->minor_lock); tap = idr_find(&tap_major->minor_idr, minor); if (tap) { dev = tap->dev; dev_hold(dev); } spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); return tap; } static void tap_sock_write_space(struct sock *sk) { wait_queue_head_t *wqueue; if (!sock_writeable(sk) || !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); } static void tap_sock_destruct(struct sock *sk) { struct tap_queue *q = container_of(sk, struct tap_queue, sk); ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb); } static int tap_open(struct inode *inode, struct file *file) { struct net *net = current->nsproxy->net_ns; struct tap_dev *tap; struct tap_queue *q; int err = -ENODEV; rtnl_lock(); tap = dev_get_by_tap_file(imajor(inode), iminor(inode)); if (!tap) goto err; err = -ENOMEM; q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tap_proto, 0); if (!q) goto err; if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) { sk_free(&q->sk); goto err; } init_waitqueue_head(&q->sock.wq.wait); q->sock.type = SOCK_RAW; q->sock.state = SS_CONNECTED; q->sock.file = file; q->sock.ops = &tap_socket_ops; sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); q->sk.sk_write_space = tap_sock_write_space; q->sk.sk_destruct = tap_sock_destruct; q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); /* * so far only KVM virtio_net uses tap, enable zero copy between * guest kernel and host kernel when lower device supports zerocopy * * The macvlan supports zerocopy iff the lower device supports zero * copy so we don't have to look at the lower device directly. */ if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) sock_set_flag(&q->sk, SOCK_ZEROCOPY); err = tap_set_queue(tap, file, q); if (err) { /* tap_sock_destruct() will take care of freeing ptr_ring */ goto err_put; } /* tap groks IOCB_NOWAIT just fine, mark it as such */ file->f_mode |= FMODE_NOWAIT; dev_put(tap->dev); rtnl_unlock(); return err; err_put: sock_put(&q->sk); err: if (tap) dev_put(tap->dev); rtnl_unlock(); return err; } static int tap_release(struct inode *inode, struct file *file) { struct tap_queue *q = file->private_data; tap_put_queue(q); return 0; } static __poll_t tap_poll(struct file *file, poll_table *wait) { struct tap_queue *q = file->private_data; __poll_t mask = EPOLLERR; if (!q) goto out; mask = 0; poll_wait(file, &q->sock.wq.wait, wait); if (!ptr_ring_empty(&q->ring)) mask |= EPOLLIN | EPOLLRDNORM; if (sock_writeable(&q->sk) || (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && sock_writeable(&q->sk))) mask |= EPOLLOUT | EPOLLWRNORM; out: return mask; } static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, size_t len, size_t linear, int noblock, int *err) { struct sk_buff *skb; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, err, PAGE_ALLOC_COSTLY_ORDER); if (!skb) return NULL; skb_reserve(skb, prepad); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN) /* Get packet from user space buffer */ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, struct iov_iter *from, int noblock) { int good_linear = SKB_MAX_HEAD(TAP_RESERVE); struct sk_buff *skb; struct tap_dev *tap; unsigned long total_len = iov_iter_count(from); unsigned long len = total_len; int err; struct virtio_net_hdr vnet_hdr = { 0 }; int vnet_hdr_len = 0; int copylen = 0; int depth; bool zerocopy = false; size_t linear; enum skb_drop_reason drop_reason; if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); err = -EINVAL; if (len < vnet_hdr_len) goto err; len -= vnet_hdr_len; err = -EFAULT; if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) goto err; iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && tap16_to_cpu(q, vnet_hdr.csum_start) + tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > tap16_to_cpu(q, vnet_hdr.hdr_len)) vnet_hdr.hdr_len = cpu_to_tap16(q, tap16_to_cpu(q, vnet_hdr.csum_start) + tap16_to_cpu(q, vnet_hdr.csum_offset) + 2); err = -EINVAL; if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len) goto err; } err = -EINVAL; if (unlikely(len < ETH_HLEN)) goto err; if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { struct iov_iter i; copylen = vnet_hdr.hdr_len ? tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; else if (copylen < ETH_HLEN) copylen = ETH_HLEN; linear = copylen; i = *from; iov_iter_advance(&i, copylen); if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } if (!zerocopy) { copylen = len; linear = tap16_to_cpu(q, vnet_hdr.hdr_len); if (linear > good_linear) linear = good_linear; else if (linear < ETH_HLEN) linear = ETH_HLEN; } skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen, linear, noblock, &err); if (!skb) goto err; if (zerocopy) err = zerocopy_sg_from_iter(skb, from); else err = skb_copy_datagram_from_iter(skb, 0, from, len); if (err) { drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto err_kfree; } skb_set_network_header(skb, ETH_HLEN); skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; rcu_read_lock(); tap = rcu_dereference(q->tap); if (!tap) { kfree_skb(skb); rcu_read_unlock(); return total_len; } skb->dev = tap->dev; if (vnet_hdr_len) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, tap_is_little_endian(q)); if (err) { rcu_read_unlock(); drop_reason = SKB_DROP_REASON_DEV_HDR; goto err_kfree; } } skb_probe_transport_header(skb); /* Move network header to the right position for VLAN tagged packets */ if (eth_type_vlan(skb->protocol) && vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) skb_set_network_header(skb, depth); /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; uarg->callback(NULL, uarg, false); } dev_queue_xmit(skb); rcu_read_unlock(); return total_len; err_kfree: kfree_skb_reason(skb, drop_reason); err: rcu_read_lock(); tap = rcu_dereference(q->tap); if (tap && tap->count_tx_dropped) tap->count_tx_dropped(tap); rcu_read_unlock(); return err; } static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct tap_queue *q = file->private_data; int noblock = 0; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; return tap_get_user(q, NULL, from, noblock); } /* Put packet to the user space buffer */ static ssize_t tap_put_user(struct tap_queue *q, const struct sk_buff *skb, struct iov_iter *iter) { int ret; int vnet_hdr_len = 0; int vlan_offset = 0; int total; if (q->flags & IFF_VNET_HDR) { int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; struct virtio_net_hdr vnet_hdr; vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); if (iov_iter_count(iter) < vnet_hdr_len) return -EINVAL; if (virtio_net_hdr_from_skb(skb, &vnet_hdr, tap_is_little_endian(q), true, vlan_hlen)) BUG(); if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != sizeof(vnet_hdr)) return -EFAULT; iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); } total = vnet_hdr_len; total += skb->len; if (skb_vlan_tag_present(skb)) { struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; } veth; veth.h_vlan_proto = skb->vlan_proto; veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); total += VLAN_HLEN; ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); if (ret || !iov_iter_count(iter)) goto done; ret = copy_to_iter(&veth, sizeof(veth), iter); if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } ret = skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: return ret ? ret : total; } static ssize_t tap_do_read(struct tap_queue *q, struct iov_iter *to, int noblock, struct sk_buff *skb) { DEFINE_WAIT(wait); ssize_t ret = 0; if (!iov_iter_count(to)) { kfree_skb(skb); return 0; } if (skb) goto put; while (1) { if (!noblock) prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE); /* Read frames from the queue */ skb = ptr_ring_consume(&q->ring); if (skb) break; if (noblock) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } /* Nothing to read, let's sleep */ schedule(); } if (!noblock) finish_wait(sk_sleep(&q->sk), &wait); put: if (skb) { ret = tap_put_user(q, skb, to); if (unlikely(ret < 0)) kfree_skb(skb); else consume_skb(skb); } return ret; } static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct tap_queue *q = file->private_data; ssize_t len = iov_iter_count(to), ret; int noblock = 0; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; ret = tap_do_read(q, to, noblock, NULL); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; return ret; } static struct tap_dev *tap_get_tap_dev(struct tap_queue *q) { struct tap_dev *tap; ASSERT_RTNL(); tap = rtnl_dereference(q->tap); if (tap) dev_hold(tap->dev); return tap; } static void tap_put_tap_dev(struct tap_dev *tap) { dev_put(tap->dev); } static int tap_ioctl_set_queue(struct file *file, unsigned int flags) { struct tap_queue *q = file->private_data; struct tap_dev *tap; int ret; tap = tap_get_tap_dev(q); if (!tap) return -EINVAL; if (flags & IFF_ATTACH_QUEUE) ret = tap_enable_queue(tap, file, q); else if (flags & IFF_DETACH_QUEUE) ret = tap_disable_queue(q); else ret = -EINVAL; tap_put_tap_dev(tap); return ret; } static int set_offload(struct tap_queue *q, unsigned long arg) { struct tap_dev *tap; netdev_features_t features; netdev_features_t feature_mask = 0; tap = rtnl_dereference(q->tap); if (!tap) return -ENOLINK; features = tap->dev->features; if (arg & TUN_F_CSUM) { feature_mask = NETIF_F_HW_CSUM; if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { if (arg & TUN_F_TSO_ECN) feature_mask |= NETIF_F_TSO_ECN; if (arg & TUN_F_TSO4) feature_mask |= NETIF_F_TSO; if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } /* TODO: for now USO4 and USO6 should work simultaneously */ if ((arg & (TUN_F_USO4 | TUN_F_USO6)) == (TUN_F_USO4 | TUN_F_USO6)) features |= NETIF_F_GSO_UDP_L4; } /* tun/tap driver inverts the usage for TSO offloads, where * setting the TSO bit means that the userspace wants to * accept TSO frames and turning it off means that user space * does not support TSO. * For tap, we have to invert it to mean the same thing. * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6) || (feature_mask & (TUN_F_USO4 | TUN_F_USO6)) == (TUN_F_USO4 | TUN_F_USO6)) features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; /* tap_features are the same as features on tun/tap and * reflect user expectations. */ tap->tap_features = feature_mask; if (tap->update_features) tap->update_features(tap, features); return 0; } /* * provide compatibility with generic tun/tap interface */ static long tap_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct tap_queue *q = file->private_data; struct tap_dev *tap; void __user *argp = (void __user *)arg; struct ifreq __user *ifr = argp; unsigned int __user *up = argp; unsigned short u; int __user *sp = argp; struct sockaddr sa; int s; int ret; switch (cmd) { case TUNSETIFF: /* ignore the name, just look at flags */ if (get_user(u, &ifr->ifr_flags)) return -EFAULT; ret = 0; if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP)) ret = -EINVAL; else q->flags = (q->flags & ~TAP_IFFEATURES) | u; return ret; case TUNGETIFF: rtnl_lock(); tap = tap_get_tap_dev(q); if (!tap) { rtnl_unlock(); return -ENOLINK; } ret = 0; u = q->flags; if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || put_user(u, &ifr->ifr_flags)) ret = -EFAULT; tap_put_tap_dev(tap); rtnl_unlock(); return ret; case TUNSETQUEUE: if (get_user(u, &ifr->ifr_flags)) return -EFAULT; rtnl_lock(); ret = tap_ioctl_set_queue(file, u); rtnl_unlock(); return ret; case TUNGETFEATURES: if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up)) return -EFAULT; return 0; case TUNSETSNDBUF: if (get_user(s, sp)) return -EFAULT; if (s <= 0) return -EINVAL; q->sk.sk_sndbuf = s; return 0; case TUNGETVNETHDRSZ: s = q->vnet_hdr_sz; if (put_user(s, sp)) return -EFAULT; return 0; case TUNSETVNETHDRSZ: if (get_user(s, sp)) return -EFAULT; if (s < (int)sizeof(struct virtio_net_hdr)) return -EINVAL; q->vnet_hdr_sz = s; return 0; case TUNGETVNETLE: s = !!(q->flags & TAP_VNET_LE); if (put_user(s, sp)) return -EFAULT; return 0; case TUNSETVNETLE: if (get_user(s, sp)) return -EFAULT; if (s) q->flags |= TAP_VNET_LE; else q->flags &= ~TAP_VNET_LE; return 0; case TUNGETVNETBE: return tap_get_vnet_be(q, sp); case TUNSETVNETBE: return tap_set_vnet_be(q, sp); case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_TSO_ECN | TUN_F_UFO | TUN_F_USO4 | TUN_F_USO6)) return -EINVAL; rtnl_lock(); ret = set_offload(q, arg); rtnl_unlock(); return ret; case SIOCGIFHWADDR: rtnl_lock(); tap = tap_get_tap_dev(q); if (!tap) { rtnl_unlock(); return -ENOLINK; } ret = 0; dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name); if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa))) ret = -EFAULT; tap_put_tap_dev(tap); rtnl_unlock(); return ret; case SIOCSIFHWADDR: if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) return -EFAULT; rtnl_lock(); tap = tap_get_tap_dev(q); if (!tap) { rtnl_unlock(); return -ENOLINK; } ret = dev_set_mac_address_user(tap->dev, &sa, NULL); tap_put_tap_dev(tap); rtnl_unlock(); return ret; default: return -EINVAL; } } static const struct file_operations tap_fops = { .owner = THIS_MODULE, .open = tap_open, .release = tap_release, .read_iter = tap_read_iter, .write_iter = tap_write_iter, .poll = tap_poll, .llseek = no_llseek, .unlocked_ioctl = tap_ioctl, .compat_ioctl = compat_ptr_ioctl, }; static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) { struct tun_xdp_hdr *hdr = xdp->data_hard_start; struct virtio_net_hdr *gso = &hdr->gso; int buflen = hdr->buflen; int vnet_hdr_len = 0; struct tap_dev *tap; struct sk_buff *skb; int err, depth; if (q->flags & IFF_VNET_HDR) vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); skb = build_skb(xdp->data_hard_start, buflen); if (!skb) { err = -ENOMEM; goto err; } skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); skb_set_network_header(skb, ETH_HLEN); skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; if (vnet_hdr_len) { err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q)); if (err) goto err_kfree; } /* Move network header to the right position for VLAN tagged packets */ if (eth_type_vlan(skb->protocol) && vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) skb_set_network_header(skb, depth); rcu_read_lock(); tap = rcu_dereference(q->tap); if (tap) { skb->dev = tap->dev; skb_probe_transport_header(skb); dev_queue_xmit(skb); } else { kfree_skb(skb); } rcu_read_unlock(); return 0; err_kfree: kfree_skb(skb); err: rcu_read_lock(); tap = rcu_dereference(q->tap); if (tap && tap->count_tx_dropped) tap->count_tx_dropped(tap); rcu_read_unlock(); return err; } static int tap_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); struct tun_msg_ctl *ctl = m->msg_control; struct xdp_buff *xdp; int i; if (m->msg_controllen == sizeof(struct tun_msg_ctl) && ctl && ctl->type == TUN_MSG_PTR) { for (i = 0; i < ctl->num; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; tap_get_user_xdp(q, xdp); } return 0; } return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); } static int tap_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); struct sk_buff *skb = m->msg_control; int ret; if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { kfree_skb(skb); return -EINVAL; } ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; } return ret; } static int tap_peek_len(struct socket *sock) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag); } /* Ops structure to mimic raw sockets with tun */ static const struct proto_ops tap_socket_ops = { .sendmsg = tap_sendmsg, .recvmsg = tap_recvmsg, .peek_len = tap_peek_len, }; /* Get an underlying socket object from tun file. Returns error unless file is * attached to a device. The returned object works like a packet socket, it * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for * holding a reference to the file for as long as the socket is in use. */ struct socket *tap_get_socket(struct file *file) { struct tap_queue *q; if (file->f_op != &tap_fops) return ERR_PTR(-EINVAL); q = file->private_data; if (!q) return ERR_PTR(-EBADFD); return &q->sock; } EXPORT_SYMBOL_GPL(tap_get_socket); struct ptr_ring *tap_get_ptr_ring(struct file *file) { struct tap_queue *q; if (file->f_op != &tap_fops) return ERR_PTR(-EINVAL); q = file->private_data; if (!q) return ERR_PTR(-EBADFD); return &q->ring; } EXPORT_SYMBOL_GPL(tap_get_ptr_ring); int tap_queue_resize(struct tap_dev *tap) { struct net_device *dev = tap->dev; struct tap_queue *q; struct ptr_ring **rings; int n = tap->numqueues; int ret, i = 0; rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); if (!rings) return -ENOMEM; list_for_each_entry(q, &tap->queue_list, next) rings[i++] = &q->ring; ret = ptr_ring_resize_multiple(rings, n, dev->tx_queue_len, GFP_KERNEL, __skb_array_destroy_skb); kfree(rings); return ret; } EXPORT_SYMBOL_GPL(tap_queue_resize); static int tap_list_add(dev_t major, const char *device_name) { struct major_info *tap_major; tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC); if (!tap_major) return -ENOMEM; tap_major->major = MAJOR(major); idr_init(&tap_major->minor_idr); spin_lock_init(&tap_major->minor_lock); tap_major->device_name = device_name; list_add_tail_rcu(&tap_major->next, &major_list); return 0; } int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, const char *device_name, struct module *module) { int err; err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name); if (err) goto out1; cdev_init(tap_cdev, &tap_fops); tap_cdev->owner = module; err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); if (err) goto out2; err = tap_list_add(*tap_major, device_name); if (err) goto out3; return 0; out3: cdev_del(tap_cdev); out2: unregister_chrdev_region(*tap_major, TAP_NUM_DEVS); out1: return err; } EXPORT_SYMBOL_GPL(tap_create_cdev); void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) { struct major_info *tap_major, *tmp; cdev_del(tap_cdev); unregister_chrdev_region(major, TAP_NUM_DEVS); list_for_each_entry_safe(tap_major, tmp, &major_list, next) { if (tap_major->major == MAJOR(major)) { idr_destroy(&tap_major->minor_idr); list_del_rcu(&tap_major->next); kfree_rcu(tap_major, rcu); } } } EXPORT_SYMBOL_GPL(tap_destroy_cdev); MODULE_AUTHOR("Arnd Bergmann <[email protected]>"); MODULE_AUTHOR("Sainath Grandhi <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/tap.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2007 Patrick McHardy <[email protected]> * * The code this is based on carried the following copyright notice: * --- * (C) Copyright 2001-2006 * Alex Zeffertt, Cambridge Broadband Ltd, [email protected] * Re-worked by Ben Greear <[email protected]> * --- */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/rculist.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/net_tstamp.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/if_link.h> #include <linux/if_macvlan.h> #include <linux/hash.h> #include <linux/workqueue.h> #include <net/rtnetlink.h> #include <net/xfrm.h> #include <linux/netpoll.h> #include <linux/phy.h> #define MACVLAN_HASH_BITS 8 #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) #define MACVLAN_DEFAULT_BC_QUEUE_LEN 1000 #define MACVLAN_F_PASSTHRU 1 #define MACVLAN_F_ADDRCHANGE 2 struct macvlan_port { struct net_device *dev; struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; struct list_head vlans; struct sk_buff_head bc_queue; struct work_struct bc_work; u32 bc_queue_len_used; int bc_cutoff; u32 flags; int count; struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; DECLARE_BITMAP(bc_filter, MACVLAN_MC_FILTER_SZ); DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); unsigned char perm_addr[ETH_ALEN]; }; struct macvlan_source_entry { struct hlist_node hlist; struct macvlan_dev *vlan; unsigned char addr[6+2] __aligned(sizeof(u16)); struct rcu_head rcu; }; struct macvlan_skb_cb { const struct macvlan_dev *src; }; #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0])) static void macvlan_port_destroy(struct net_device *dev); static void update_port_bc_queue_len(struct macvlan_port *port); static inline bool macvlan_passthru(const struct macvlan_port *port) { return port->flags & MACVLAN_F_PASSTHRU; } static inline void macvlan_set_passthru(struct macvlan_port *port) { port->flags |= MACVLAN_F_PASSTHRU; } static inline bool macvlan_addr_change(const struct macvlan_port *port) { return port->flags & MACVLAN_F_ADDRCHANGE; } static inline void macvlan_set_addr_change(struct macvlan_port *port) { port->flags |= MACVLAN_F_ADDRCHANGE; } static inline void macvlan_clear_addr_change(struct macvlan_port *port) { port->flags &= ~MACVLAN_F_ADDRCHANGE; } /* Hash Ethernet address */ static u32 macvlan_eth_hash(const unsigned char *addr) { u64 value = get_unaligned((u64 *)addr); /* only want 6 bytes */ #ifdef __BIG_ENDIAN value >>= 16; #else value <<= 16; #endif return hash_64(value, MACVLAN_HASH_BITS); } static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->rx_handler_data); } static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev) { return rtnl_dereference(dev->rx_handler_data); } static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, const unsigned char *addr) { struct macvlan_dev *vlan; u32 idx = macvlan_eth_hash(addr); hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist, lockdep_rtnl_is_held()) { if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) return vlan; } return NULL; } static struct macvlan_source_entry *macvlan_hash_lookup_source( const struct macvlan_dev *vlan, const unsigned char *addr) { struct macvlan_source_entry *entry; u32 idx = macvlan_eth_hash(addr); struct hlist_head *h = &vlan->port->vlan_source_hash[idx]; hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (ether_addr_equal_64bits(entry->addr, addr) && entry->vlan == vlan) return entry; } return NULL; } static int macvlan_hash_add_source(struct macvlan_dev *vlan, const unsigned char *addr) { struct macvlan_port *port = vlan->port; struct macvlan_source_entry *entry; struct hlist_head *h; entry = macvlan_hash_lookup_source(vlan, addr); if (entry) return 0; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; ether_addr_copy(entry->addr, addr); entry->vlan = vlan; h = &port->vlan_source_hash[macvlan_eth_hash(addr)]; hlist_add_head_rcu(&entry->hlist, h); vlan->macaddr_count++; return 0; } static void macvlan_hash_add(struct macvlan_dev *vlan) { struct macvlan_port *port = vlan->port; const unsigned char *addr = vlan->dev->dev_addr; u32 idx = macvlan_eth_hash(addr); hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[idx]); } static void macvlan_hash_del_source(struct macvlan_source_entry *entry) { hlist_del_rcu(&entry->hlist); kfree_rcu(entry, rcu); } static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync) { hlist_del_rcu(&vlan->hlist); if (sync) synchronize_rcu(); } static void macvlan_hash_change_addr(struct macvlan_dev *vlan, const unsigned char *addr) { macvlan_hash_del(vlan, true); /* Now that we are unhashed it is safe to change the device * address without confusing packet delivery. */ eth_hw_addr_set(vlan->dev, addr); macvlan_hash_add(vlan); } static bool macvlan_addr_busy(const struct macvlan_port *port, const unsigned char *addr) { /* Test to see if the specified address is * currently in use by the underlying device or * another macvlan. */ if (!macvlan_passthru(port) && !macvlan_addr_change(port) && ether_addr_equal_64bits(port->dev->dev_addr, addr)) return true; if (macvlan_hash_lookup(port, addr)) return true; return false; } static int macvlan_broadcast_one(struct sk_buff *skb, const struct macvlan_dev *vlan, const struct ethhdr *eth, bool local) { struct net_device *dev = vlan->dev; if (local) return __dev_forward_skb(dev, skb); skb->dev = dev; if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; return 0; } static u32 macvlan_hash_mix(const struct macvlan_dev *vlan) { return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT); } static unsigned int mc_hash(const struct macvlan_dev *vlan, const unsigned char *addr) { u32 val = __get_unaligned_cpu32(addr + 2); val ^= macvlan_hash_mix(vlan); return hash_32(val, MACVLAN_MC_FILTER_BITS); } static void macvlan_broadcast(struct sk_buff *skb, const struct macvlan_port *port, struct net_device *src, enum macvlan_mode mode) { const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; struct sk_buff *nskb; unsigned int i; int err; unsigned int hash; if (skb->protocol == htons(ETH_P_PAUSE)) return; hash_for_each_rcu(port->vlan_hash, i, vlan, hlist) { if (vlan->dev == src || !(vlan->mode & mode)) continue; hash = mc_hash(vlan, eth->h_dest); if (!test_bit(hash, vlan->mc_filter)) continue; err = NET_RX_DROP; nskb = skb_clone(skb, GFP_ATOMIC); if (likely(nskb)) err = macvlan_broadcast_one(nskb, vlan, eth, mode == MACVLAN_MODE_BRIDGE) ?: netif_rx(nskb); macvlan_count_rx(vlan, skb->len + ETH_HLEN, err == NET_RX_SUCCESS, true); } } static void macvlan_multicast_rx(const struct macvlan_port *port, const struct macvlan_dev *src, struct sk_buff *skb) { if (!src) /* frame comes from an external address */ macvlan_broadcast(skb, port, NULL, MACVLAN_MODE_PRIVATE | MACVLAN_MODE_VEPA | MACVLAN_MODE_PASSTHRU| MACVLAN_MODE_BRIDGE); else if (src->mode == MACVLAN_MODE_VEPA) /* flood to everyone except source */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA | MACVLAN_MODE_BRIDGE); else /* * flood only to VEPA ports, bridge ports * already saw the frame on the way out. */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA); } static void macvlan_process_broadcast(struct work_struct *w) { struct macvlan_port *port = container_of(w, struct macvlan_port, bc_work); struct sk_buff *skb; struct sk_buff_head list; __skb_queue_head_init(&list); spin_lock_bh(&port->bc_queue.lock); skb_queue_splice_tail_init(&port->bc_queue, &list); spin_unlock_bh(&port->bc_queue.lock); while ((skb = __skb_dequeue(&list))) { const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; rcu_read_lock(); macvlan_multicast_rx(port, src, skb); rcu_read_unlock(); if (src) dev_put(src->dev); consume_skb(skb); cond_resched(); } } static void macvlan_broadcast_enqueue(struct macvlan_port *port, const struct macvlan_dev *src, struct sk_buff *skb) { struct sk_buff *nskb; int err = -ENOMEM; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) goto err; MACVLAN_SKB_CB(nskb)->src = src; spin_lock(&port->bc_queue.lock); if (skb_queue_len(&port->bc_queue) < port->bc_queue_len_used) { if (src) dev_hold(src->dev); __skb_queue_tail(&port->bc_queue, nskb); err = 0; } spin_unlock(&port->bc_queue.lock); queue_work(system_unbound_wq, &port->bc_work); if (err) goto free_nskb; return; free_nskb: kfree_skb(nskb); err: dev_core_stats_rx_dropped_inc(skb->dev); } static void macvlan_flush_sources(struct macvlan_port *port, struct macvlan_dev *vlan) { struct macvlan_source_entry *entry; struct hlist_node *next; int i; hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist) if (entry->vlan == vlan) macvlan_hash_del_source(entry); vlan->macaddr_count = 0; } static void macvlan_forward_source_one(struct sk_buff *skb, struct macvlan_dev *vlan) { struct sk_buff *nskb; struct net_device *dev; int len; int ret; dev = vlan->dev; if (unlikely(!(dev->flags & IFF_UP))) return; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return; len = nskb->len + ETH_HLEN; nskb->dev = dev; if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr)) nskb->pkt_type = PACKET_HOST; ret = __netif_rx(nskb); macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); } static bool macvlan_forward_source(struct sk_buff *skb, struct macvlan_port *port, const unsigned char *addr) { struct macvlan_source_entry *entry; u32 idx = macvlan_eth_hash(addr); struct hlist_head *h = &port->vlan_source_hash[idx]; bool consume = false; hlist_for_each_entry_rcu(entry, h, hlist) { if (ether_addr_equal_64bits(entry->addr, addr)) { if (entry->vlan->flags & MACVLAN_FLAG_NODST) consume = true; macvlan_forward_source_one(skb, entry->vlan); } } return consume; } /* called under rcu_read_lock() from netif_receive_skb */ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) { struct macvlan_port *port; struct sk_buff *skb = *pskb; const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; const struct macvlan_dev *src; struct net_device *dev; unsigned int len = 0; int ret; rx_handler_result_t handle_res; /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { unsigned int hash; skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); if (!skb) return RX_HANDLER_CONSUMED; *pskb = skb; eth = eth_hdr(skb); if (macvlan_forward_source(skb, port, eth->h_source)) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } src = macvlan_hash_lookup(port, eth->h_source); if (src && src->mode != MACVLAN_MODE_VEPA && src->mode != MACVLAN_MODE_BRIDGE) { /* forward to original port. */ vlan = src; ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: __netif_rx(skb); handle_res = RX_HANDLER_CONSUMED; goto out; } hash = mc_hash(NULL, eth->h_dest); if (test_bit(hash, port->bc_filter)) macvlan_broadcast_enqueue(port, src, skb); else if (test_bit(hash, port->mc_filter)) macvlan_multicast_rx(port, src, skb); return RX_HANDLER_PASS; } if (macvlan_forward_source(skb, port, eth->h_source)) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } if (macvlan_passthru(port)) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE) return RX_HANDLER_PASS; dev = vlan->dev; if (unlikely(!(dev->flags & IFF_UP))) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } len = skb->len + ETH_HLEN; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) { ret = NET_RX_DROP; handle_res = RX_HANDLER_CONSUMED; goto out; } *pskb = skb; skb->dev = dev; skb->pkt_type = PACKET_HOST; ret = NET_RX_SUCCESS; handle_res = RX_HANDLER_ANOTHER; out: macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); return handle_res; } static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) { const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = skb_eth_hdr(skb); /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { skb_reset_mac_header(skb); macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } dest = macvlan_hash_lookup(port, eth->h_dest); if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { /* send to lowerdev first for its network taps */ dev_forward_skb(vlan->lowerdev, skb); return NET_XMIT_SUCCESS; } } xmit_world: skb->dev = vlan->lowerdev; return dev_queue_xmit_accel(skb, netdev_get_sb_channel(dev) ? dev : NULL); } static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) { #ifdef CONFIG_NET_POLL_CONTROLLER return netpoll_send_skb(vlan->netpoll, skb); #else BUG(); return NETDEV_TX_OK; #endif } static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); unsigned int len = skb->len; int ret; if (unlikely(netpoll_tx_running(dev))) return macvlan_netpoll_send_skb(vlan, skb); ret = macvlan_queue_xmit(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *pcpu_stats; pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); u64_stats_inc(&pcpu_stats->tx_packets); u64_stats_add(&pcpu_stats->tx_bytes, len); u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(vlan->pcpu_stats->tx_dropped); } return ret; } static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; return dev_hard_header(skb, lowerdev, type, daddr, saddr ? : dev->dev_addr, len); } static const struct header_ops macvlan_hard_header_ops = { .create = macvlan_hard_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, }; static int macvlan_open(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; int err; if (macvlan_passthru(vlan->port)) { if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { err = dev_set_promiscuity(lowerdev, 1); if (err < 0) goto out; } goto hash_add; } err = -EADDRINUSE; if (macvlan_addr_busy(vlan->port, dev->dev_addr)) goto out; /* Attempt to populate accel_priv which is used to offload the L2 * forwarding requests for unicast packets. */ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) vlan->accel_priv = lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); /* If earlier attempt to offload failed, or accel_priv is not * populated we must add the unicast address to the lower device. */ if (IS_ERR_OR_NULL(vlan->accel_priv)) { vlan->accel_priv = NULL; err = dev_uc_add(lowerdev, dev->dev_addr); if (err < 0) goto out; } if (dev->flags & IFF_ALLMULTI) { err = dev_set_allmulti(lowerdev, 1); if (err < 0) goto del_unicast; } if (dev->flags & IFF_PROMISC) { err = dev_set_promiscuity(lowerdev, 1); if (err < 0) goto clear_multi; } hash_add: macvlan_hash_add(vlan); return 0; clear_multi: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); del_unicast: if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, vlan->accel_priv); vlan->accel_priv = NULL; } else { dev_uc_del(lowerdev, dev->dev_addr); } out: return err; } static int macvlan_stop(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, vlan->accel_priv); vlan->accel_priv = NULL; } dev_uc_unsync(lowerdev, dev); dev_mc_unsync(lowerdev, dev); if (macvlan_passthru(vlan->port)) { if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) dev_set_promiscuity(lowerdev, -1); goto hash_del; } if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); if (dev->flags & IFF_PROMISC) dev_set_promiscuity(lowerdev, -1); dev_uc_del(lowerdev, dev->dev_addr); hash_del: macvlan_hash_del(vlan, !dev->dismantle); return 0; } static int macvlan_sync_address(struct net_device *dev, const unsigned char *addr) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; struct macvlan_port *port = vlan->port; int err; if (!(dev->flags & IFF_UP)) { /* Just copy in the new address */ eth_hw_addr_set(dev, addr); } else { /* Rehash and update the device filters */ if (macvlan_addr_busy(vlan->port, addr)) return -EADDRINUSE; if (!macvlan_passthru(port)) { err = dev_uc_add(lowerdev, addr); if (err) return err; dev_uc_del(lowerdev, dev->dev_addr); } macvlan_hash_change_addr(vlan, addr); } if (macvlan_passthru(port) && !macvlan_addr_change(port)) { /* Since addr_change isn't set, we are here due to lower * device change. Save the lower-dev address so we can * restore it later. */ ether_addr_copy(vlan->port->perm_addr, lowerdev->dev_addr); } macvlan_clear_addr_change(port); return 0; } static int macvlan_set_mac_address(struct net_device *dev, void *p) { struct macvlan_dev *vlan = netdev_priv(dev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; /* If the addresses are the same, this is a no-op */ if (ether_addr_equal(dev->dev_addr, addr->sa_data)) return 0; if (vlan->mode == MACVLAN_MODE_PASSTHRU) { macvlan_set_addr_change(vlan->port); return dev_set_mac_address(vlan->lowerdev, addr, NULL); } if (macvlan_addr_busy(vlan->port, addr->sa_data)) return -EADDRINUSE; return macvlan_sync_address(dev, addr->sa_data); } static void macvlan_change_rx_flags(struct net_device *dev, int change) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; if (dev->flags & IFF_UP) { if (change & IFF_ALLMULTI) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); if (change & IFF_PROMISC) dev_set_promiscuity(lowerdev, dev->flags & IFF_PROMISC ? 1 : -1); } } static void macvlan_compute_filter(unsigned long *mc_filter, struct net_device *dev, struct macvlan_dev *vlan, int cutoff) { if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ); } else { DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ); struct netdev_hw_addr *ha; bitmap_zero(filter, MACVLAN_MC_FILTER_SZ); netdev_for_each_mc_addr(ha, dev) { if (!vlan && ha->synced <= cutoff) continue; __set_bit(mc_hash(vlan, ha->addr), filter); } __set_bit(mc_hash(vlan, dev->broadcast), filter); bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ); } } static void macvlan_recompute_bc_filter(struct macvlan_dev *vlan) { if (vlan->port->bc_cutoff < 0) { bitmap_zero(vlan->port->bc_filter, MACVLAN_MC_FILTER_SZ); return; } macvlan_compute_filter(vlan->port->bc_filter, vlan->lowerdev, NULL, vlan->port->bc_cutoff); } static void macvlan_set_mac_lists(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); macvlan_compute_filter(vlan->mc_filter, dev, vlan, 0); dev_uc_sync(vlan->lowerdev, dev); dev_mc_sync(vlan->lowerdev, dev); /* This is slightly inaccurate as we're including the subscription * list of vlan->lowerdev too. * * Bug alert: This only works if everyone has the same broadcast * address as lowerdev. As soon as someone changes theirs this * will break. * * However, this is already broken as when you change your broadcast * address we don't get called. * * The solution is to maintain a list of broadcast addresses like * we do for uc/mc, if you care. */ macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL, 0); macvlan_recompute_bc_filter(vlan); } static void update_port_bc_cutoff(struct macvlan_dev *vlan, int cutoff) { if (vlan->port->bc_cutoff == cutoff) return; vlan->port->bc_cutoff = cutoff; macvlan_recompute_bc_filter(vlan); } static int macvlan_change_mtu(struct net_device *dev, int new_mtu) { struct macvlan_dev *vlan = netdev_priv(dev); if (vlan->lowerdev->mtu < new_mtu) return -EINVAL; dev->mtu = new_mtu; return 0; } static int macvlan_hwtstamp_get(struct net_device *dev, struct kernel_hwtstamp_config *cfg) { struct net_device *real_dev = macvlan_dev_real_dev(dev); return generic_hwtstamp_get_lower(real_dev, cfg); } static int macvlan_hwtstamp_set(struct net_device *dev, struct kernel_hwtstamp_config *cfg, struct netlink_ext_ack *extack) { struct net_device *real_dev = macvlan_dev_real_dev(dev); if (!net_eq(dev_net(dev), &init_net)) return -EOPNOTSUPP; return generic_hwtstamp_set_lower(real_dev, cfg, extack); } /* * macvlan network devices have devices nesting below it and are a special * "super class" of normal network devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key macvlan_netdev_addr_lock_key; #define ALWAYS_ON_OFFLOADS \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) #define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX) #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_LRO | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) static void macvlan_set_lockdep_class(struct net_device *dev) { netdev_lockdep_set_classes(dev); lockdep_set_class(&dev->addr_list_lock, &macvlan_netdev_addr_lock_key); } static int macvlan_init(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; struct macvlan_port *port = vlan->port; dev->state = (dev->state & ~MACVLAN_STATE_MASK) | (lowerdev->state & MACVLAN_STATE_MASK); dev->features = lowerdev->features & MACVLAN_FEATURES; dev->features |= ALWAYS_ON_FEATURES; dev->hw_features |= NETIF_F_LRO; dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->vlan_features |= ALWAYS_ON_OFFLOADS; dev->hw_enc_features |= dev->features; netif_inherit_tso_max(dev, lowerdev); dev->hard_header_len = lowerdev->hard_header_len; macvlan_set_lockdep_class(dev); vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan->pcpu_stats) return -ENOMEM; port->count += 1; /* Get macvlan's reference to lowerdev */ netdev_hold(lowerdev, &vlan->dev_tracker, GFP_KERNEL); return 0; } static void macvlan_uninit(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port = vlan->port; free_percpu(vlan->pcpu_stats); macvlan_flush_sources(port, vlan); port->count -= 1; if (!port->count) macvlan_port_destroy(port->dev); } static void macvlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct macvlan_dev *vlan = netdev_priv(dev); if (vlan->pcpu_stats) { struct vlan_pcpu_stats *p; u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; u32 rx_errors = 0, tx_dropped = 0; unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(vlan->pcpu_stats, i); do { start = u64_stats_fetch_begin(&p->syncp); rx_packets = u64_stats_read(&p->rx_packets); rx_bytes = u64_stats_read(&p->rx_bytes); rx_multicast = u64_stats_read(&p->rx_multicast); tx_packets = u64_stats_read(&p->tx_packets); tx_bytes = u64_stats_read(&p->tx_bytes); } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->multicast += rx_multicast; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* rx_errors & tx_dropped are u32, updated * without syncp protection. */ rx_errors += READ_ONCE(p->rx_errors); tx_dropped += READ_ONCE(p->tx_dropped); } stats->rx_errors = rx_errors; stats->rx_dropped = rx_errors; stats->tx_dropped = tx_dropped; } } static int macvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; return vlan_vid_add(lowerdev, proto, vid); } static int macvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; vlan_vid_del(lowerdev, proto, vid); return 0; } static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags, struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; /* Support unicast filter only on passthru devices. * Multicast filter should be allowed on all devices. */ if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) return -EOPNOTSUPP; if (flags & NLM_F_REPLACE) return -EOPNOTSUPP; if (is_unicast_ether_addr(addr)) err = dev_uc_add_excl(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_add_excl(dev, addr); return err; } static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; /* Support unicast filter only on passthru devices. * Multicast filter should be allowed on all devices. */ if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) return -EOPNOTSUPP; if (is_unicast_ether_addr(addr)) err = dev_uc_del(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_del(dev, addr); return err; } static void macvlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver)); strscpy(drvinfo->version, "0.1", sizeof(drvinfo->version)); } static int macvlan_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { const struct macvlan_dev *vlan = netdev_priv(dev); return __ethtool_get_link_ksettings(vlan->lowerdev, cmd); } static int macvlan_ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct net_device *real_dev = macvlan_dev_real_dev(dev); const struct ethtool_ops *ops = real_dev->ethtool_ops; struct phy_device *phydev = real_dev->phydev; if (phy_has_tsinfo(phydev)) { return phy_ts_info(phydev, info); } else if (ops->get_ts_info) { return ops->get_ts_info(real_dev, info); } else { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; info->phc_index = -1; } return 0; } static netdev_features_t macvlan_fix_features(struct net_device *dev, netdev_features_t features) { struct macvlan_dev *vlan = netdev_priv(dev); netdev_features_t lowerdev_features = vlan->lowerdev->features; netdev_features_t mask; features |= NETIF_F_ALL_FOR_ALL; features &= (vlan->set_features | ~MACVLAN_FEATURES); mask = features; lowerdev_features &= (features | ~NETIF_F_LRO); features = netdev_increment_features(lowerdev_features, features, mask); features |= ALWAYS_ON_FEATURES; features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES); return features; } #ifdef CONFIG_NET_POLL_CONTROLLER static void macvlan_dev_poll_controller(struct net_device *dev) { return; } static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *real_dev = vlan->lowerdev; struct netpoll *netpoll; int err; netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); err = -ENOMEM; if (!netpoll) goto out; err = __netpoll_setup(netpoll, real_dev); if (err) { kfree(netpoll); goto out; } vlan->netpoll = netpoll; out: return err; } static void macvlan_dev_netpoll_cleanup(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct netpoll *netpoll = vlan->netpoll; if (!netpoll) return; vlan->netpoll = NULL; __netpoll_free(netpoll); } #endif /* CONFIG_NET_POLL_CONTROLLER */ static int macvlan_dev_get_iflink(const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); return vlan->lowerdev->ifindex; } static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = macvlan_ethtool_get_link_ksettings, .get_drvinfo = macvlan_ethtool_get_drvinfo, .get_ts_info = macvlan_ethtool_get_ts_info, }; static const struct net_device_ops macvlan_netdev_ops = { .ndo_init = macvlan_init, .ndo_uninit = macvlan_uninit, .ndo_open = macvlan_open, .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, .ndo_change_mtu = macvlan_change_mtu, .ndo_fix_features = macvlan_fix_features, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, .ndo_set_rx_mode = macvlan_set_mac_lists, .ndo_get_stats64 = macvlan_dev_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, .ndo_fdb_add = macvlan_fdb_add, .ndo_fdb_del = macvlan_fdb_del, .ndo_fdb_dump = ndo_dflt_fdb_dump, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = macvlan_dev_poll_controller, .ndo_netpoll_setup = macvlan_dev_netpoll_setup, .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup, #endif .ndo_get_iflink = macvlan_dev_get_iflink, .ndo_features_check = passthru_features_check, .ndo_hwtstamp_get = macvlan_hwtstamp_get, .ndo_hwtstamp_set = macvlan_hwtstamp_set, }; static void macvlan_dev_free(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); /* Get rid of the macvlan's reference to lowerdev */ netdev_put(vlan->lowerdev, &vlan->dev_tracker); } void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */ dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN; dev->netdev_ops = &macvlan_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = macvlan_dev_free; dev->header_ops = &macvlan_hard_header_ops; dev->ethtool_ops = &macvlan_ethtool_ops; } EXPORT_SYMBOL_GPL(macvlan_common_setup); static void macvlan_setup(struct net_device *dev) { macvlan_common_setup(dev); dev->priv_flags |= IFF_NO_QUEUE; } static int macvlan_port_create(struct net_device *dev) { struct macvlan_port *port; unsigned int i; int err; if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; if (netdev_is_rx_handler_busy(dev)) return -EBUSY; port = kzalloc(sizeof(*port), GFP_KERNEL); if (port == NULL) return -ENOMEM; port->dev = dev; ether_addr_copy(port->perm_addr, dev->dev_addr); INIT_LIST_HEAD(&port->vlans); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_hash[i]); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_source_hash[i]); port->bc_queue_len_used = 0; port->bc_cutoff = 1; skb_queue_head_init(&port->bc_queue); INIT_WORK(&port->bc_work, macvlan_process_broadcast); err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); if (err) kfree(port); else dev->priv_flags |= IFF_MACVLAN_PORT; return err; } static void macvlan_port_destroy(struct net_device *dev) { struct macvlan_port *port = macvlan_port_get_rtnl(dev); struct sk_buff *skb; dev->priv_flags &= ~IFF_MACVLAN_PORT; netdev_rx_handler_unregister(dev); /* After this point, no packet can schedule bc_work anymore, * but we need to cancel it and purge left skbs if any. */ cancel_work_sync(&port->bc_work); while ((skb = __skb_dequeue(&port->bc_queue))) { const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; if (src) dev_put(src->dev); kfree_skb(skb); } /* If the lower device address has been changed by passthru * macvlan, put it back. */ if (macvlan_passthru(port) && !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) { struct sockaddr sa; sa.sa_family = port->dev->type; memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len); dev_set_mac_address(port->dev, &sa, NULL); } kfree(port); } static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct nlattr *nla, *head; int rem, len; if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (!data) return 0; if (data[IFLA_MACVLAN_FLAGS] && nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~(MACVLAN_FLAG_NOPROMISC | MACVLAN_FLAG_NODST)) return -EINVAL; if (data[IFLA_MACVLAN_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { case MACVLAN_MODE_PRIVATE: case MACVLAN_MODE_VEPA: case MACVLAN_MODE_BRIDGE: case MACVLAN_MODE_PASSTHRU: case MACVLAN_MODE_SOURCE: break; default: return -EINVAL; } } if (data[IFLA_MACVLAN_MACADDR_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE])) { case MACVLAN_MACADDR_ADD: case MACVLAN_MACADDR_DEL: case MACVLAN_MACADDR_FLUSH: case MACVLAN_MACADDR_SET: break; default: return -EINVAL; } } if (data[IFLA_MACVLAN_MACADDR]) { if (nla_len(data[IFLA_MACVLAN_MACADDR]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(data[IFLA_MACVLAN_MACADDR]))) return -EADDRNOTAVAIL; } if (data[IFLA_MACVLAN_MACADDR_DATA]) { head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { if (nla_type(nla) != IFLA_MACVLAN_MACADDR || nla_len(nla) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(nla))) return -EADDRNOTAVAIL; } } if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; return 0; } /* * reconfigure list of remote source mac address * (only for macvlan devices in source mode) * Note regarding alignment: all netlink data is aligned to 4 Byte, which * suffices for both ether_addr_copy and ether_addr_equal_64bits usage. */ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, struct nlattr *data[]) { char *addr = NULL; int ret, rem, len; struct nlattr *nla, *head; struct macvlan_source_entry *entry; if (data[IFLA_MACVLAN_MACADDR]) addr = nla_data(data[IFLA_MACVLAN_MACADDR]); if (mode == MACVLAN_MACADDR_ADD) { if (!addr) return -EINVAL; return macvlan_hash_add_source(vlan, addr); } else if (mode == MACVLAN_MACADDR_DEL) { if (!addr) return -EINVAL; entry = macvlan_hash_lookup_source(vlan, addr); if (entry) { macvlan_hash_del_source(entry); vlan->macaddr_count--; } } else if (mode == MACVLAN_MACADDR_FLUSH) { macvlan_flush_sources(vlan->port, vlan); } else if (mode == MACVLAN_MACADDR_SET) { macvlan_flush_sources(vlan->port, vlan); if (addr) { ret = macvlan_hash_add_source(vlan, addr); if (ret) return ret; } if (!data[IFLA_MACVLAN_MACADDR_DATA]) return 0; head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { addr = nla_data(nla); ret = macvlan_hash_add_source(vlan, addr); if (ret) return ret; } } else { return -EINVAL; } return 0; } int macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port; struct net_device *lowerdev; int err; int macmode; bool create = false; if (!tb[IFLA_LINK]) return -EINVAL; lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (lowerdev == NULL) return -ENODEV; /* When creating macvlans or macvtaps on top of other macvlans - use * the real device as the lowerdev. */ if (netif_is_macvlan(lowerdev)) lowerdev = macvlan_dev_real_dev(lowerdev); if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu; else if (dev->mtu > lowerdev->mtu) return -EINVAL; /* MTU range: 68 - lowerdev->max_mtu */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = lowerdev->max_mtu; if (!tb[IFLA_ADDRESS]) eth_hw_addr_random(dev); if (!netif_is_macvlan_port(lowerdev)) { err = macvlan_port_create(lowerdev); if (err < 0) return err; create = true; } port = macvlan_port_get_rtnl(lowerdev); /* Only 1 macvlan device can be created in passthru mode */ if (macvlan_passthru(port)) { /* The macvlan port must be not created this time, * still goto destroy_macvlan_port for readability. */ err = -EINVAL; goto destroy_macvlan_port; } vlan->lowerdev = lowerdev; vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); if (data && data[IFLA_MACVLAN_FLAGS]) vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); if (vlan->mode == MACVLAN_MODE_PASSTHRU) { if (port->count) { err = -EINVAL; goto destroy_macvlan_port; } macvlan_set_passthru(port); eth_hw_addr_inherit(dev, lowerdev); } if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { if (vlan->mode != MACVLAN_MODE_SOURCE) { err = -EINVAL; goto destroy_macvlan_port; } macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); err = macvlan_changelink_sources(vlan, macmode, data); if (err) goto destroy_macvlan_port; } vlan->bc_queue_len_req = MACVLAN_DEFAULT_BC_QUEUE_LEN; if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); if (data && data[IFLA_MACVLAN_BC_CUTOFF]) update_port_bc_cutoff( vlan, nla_get_s32(data[IFLA_MACVLAN_BC_CUTOFF])); err = register_netdevice(dev); if (err < 0) goto destroy_macvlan_port; dev->priv_flags |= IFF_MACVLAN; err = netdev_upper_dev_link(lowerdev, dev, extack); if (err) goto unregister_netdev; list_add_tail_rcu(&vlan->list, &port->vlans); update_port_bc_queue_len(vlan->port); netif_stacked_transfer_operstate(lowerdev, dev); linkwatch_fire_event(dev); return 0; unregister_netdev: /* macvlan_uninit would free the macvlan port */ unregister_netdevice(dev); return err; destroy_macvlan_port: /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ if (create && macvlan_port_get_rtnl(lowerdev)) { macvlan_flush_sources(port, vlan); macvlan_port_destroy(port->dev); } return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); static int macvlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { return macvlan_common_newlink(src_net, dev, tb, data, extack); } void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); if (vlan->mode == MACVLAN_MODE_SOURCE) macvlan_flush_sources(vlan->port, vlan); list_del_rcu(&vlan->list); update_port_bc_queue_len(vlan->port); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(vlan->lowerdev, dev); } EXPORT_SYMBOL_GPL(macvlan_dellink); static int macvlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); enum macvlan_mode mode; bool set_mode = false; enum macvlan_macaddr_mode macmode; int ret; /* Validate mode, but don't set yet: setting flags may fail. */ if (data && data[IFLA_MACVLAN_MODE]) { set_mode = true; mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); /* Passthrough mode can't be set or cleared dynamically */ if ((mode == MACVLAN_MODE_PASSTHRU) != (vlan->mode == MACVLAN_MODE_PASSTHRU)) return -EINVAL; if (vlan->mode == MACVLAN_MODE_SOURCE && vlan->mode != mode) macvlan_flush_sources(vlan->port, vlan); } if (data && data[IFLA_MACVLAN_FLAGS]) { __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; if (macvlan_passthru(vlan->port) && promisc) { int err; if (flags & MACVLAN_FLAG_NOPROMISC) err = dev_set_promiscuity(vlan->lowerdev, -1); else err = dev_set_promiscuity(vlan->lowerdev, 1); if (err < 0) return err; } vlan->flags = flags; } if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) { vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); update_port_bc_queue_len(vlan->port); } if (data && data[IFLA_MACVLAN_BC_CUTOFF]) update_port_bc_cutoff( vlan, nla_get_s32(data[IFLA_MACVLAN_BC_CUTOFF])); if (set_mode) vlan->mode = mode; if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { if (vlan->mode != MACVLAN_MODE_SOURCE) return -EINVAL; macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); ret = macvlan_changelink_sources(vlan, macmode, data); if (ret) return ret; } return 0; } static size_t macvlan_get_size_mac(const struct macvlan_dev *vlan) { if (vlan->macaddr_count == 0) return 0; return nla_total_size(0) /* IFLA_MACVLAN_MACADDR_DATA */ + vlan->macaddr_count * nla_total_size(sizeof(u8) * ETH_ALEN); } static size_t macvlan_get_size(const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); return (0 + nla_total_size(4) /* IFLA_MACVLAN_MODE */ + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */ + nla_total_size(4) /* IFLA_MACVLAN_MACADDR_COUNT */ + macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */ + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN */ + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN_USED */ ); } static int macvlan_fill_info_macaddr(struct sk_buff *skb, const struct macvlan_dev *vlan, const int i) { struct hlist_head *h = &vlan->port->vlan_source_hash[i]; struct macvlan_source_entry *entry; hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (entry->vlan != vlan) continue; if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr)) return 1; } return 0; } static int macvlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port = vlan->port; int i; struct nlattr *nest; if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count)) goto nla_put_failure; if (vlan->macaddr_count > 0) { nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA); if (nest == NULL) goto nla_put_failure; for (i = 0; i < MACVLAN_HASH_SIZE; i++) { if (macvlan_fill_info_macaddr(skb, vlan, i)) goto nla_put_failure; } nla_nest_end(skb, nest); } if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN, vlan->bc_queue_len_req)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN_USED, port->bc_queue_len_used)) goto nla_put_failure; if (port->bc_cutoff != 1 && nla_put_s32(skb, IFLA_MACVLAN_BC_CUTOFF, port->bc_cutoff)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 }, [IFLA_MACVLAN_MACADDR_MODE] = { .type = NLA_U32 }, [IFLA_MACVLAN_MACADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, [IFLA_MACVLAN_MACADDR_DATA] = { .type = NLA_NESTED }, [IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 }, [IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 }, [IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT }, [IFLA_MACVLAN_BC_CUTOFF] = { .type = NLA_S32 }, }; int macvlan_link_register(struct rtnl_link_ops *ops) { /* common fields */ ops->validate = macvlan_validate; ops->maxtype = IFLA_MACVLAN_MAX; ops->policy = macvlan_policy; ops->changelink = macvlan_changelink; ops->get_size = macvlan_get_size; ops->fill_info = macvlan_fill_info; return rtnl_link_register(ops); }; EXPORT_SYMBOL_GPL(macvlan_link_register); static struct net *macvlan_get_link_net(const struct net_device *dev) { return dev_net(macvlan_dev_real_dev(dev)); } static struct rtnl_link_ops macvlan_link_ops = { .kind = "macvlan", .setup = macvlan_setup, .newlink = macvlan_newlink, .dellink = macvlan_dellink, .get_link_net = macvlan_get_link_net, .priv_size = sizeof(struct macvlan_dev), }; static void update_port_bc_queue_len(struct macvlan_port *port) { u32 max_bc_queue_len_req = 0; struct macvlan_dev *vlan; list_for_each_entry(vlan, &port->vlans, list) { if (vlan->bc_queue_len_req > max_bc_queue_len_req) max_bc_queue_len_req = vlan->bc_queue_len_req; } port->bc_queue_len_used = max_bc_queue_len_req; } static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvlan_dev *vlan, *next; struct macvlan_port *port; LIST_HEAD(list_kill); if (!netif_is_macvlan_port(dev)) return NOTIFY_DONE; port = macvlan_port_get_rtnl(dev); switch (event) { case NETDEV_UP: case NETDEV_DOWN: case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, vlan->dev); break; case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { netif_inherit_tso_max(vlan->dev, dev); netdev_update_features(vlan->dev); } break; case NETDEV_CHANGEMTU: list_for_each_entry(vlan, &port->vlans, list) { if (vlan->dev->mtu <= dev->mtu) continue; dev_set_mtu(vlan->dev, dev->mtu); } break; case NETDEV_CHANGEADDR: if (!macvlan_passthru(port)) return NOTIFY_DONE; vlan = list_first_entry_or_null(&port->vlans, struct macvlan_dev, list); if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr)) return NOTIFY_BAD; break; case NETDEV_UNREGISTER: /* twiddle thumbs on netns device moves */ if (dev->reg_state != NETREG_UNREGISTERING) break; list_for_each_entry_safe(vlan, next, &port->vlans, list) vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); unregister_netdevice_many(&list_kill); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlying device to change its type. */ return NOTIFY_BAD; case NETDEV_NOTIFY_PEERS: case NETDEV_BONDING_FAILOVER: case NETDEV_RESEND_IGMP: /* Propagate to all vlans */ list_for_each_entry(vlan, &port->vlans, list) call_netdevice_notifiers(event, vlan->dev); } return NOTIFY_DONE; } static struct notifier_block macvlan_notifier_block __read_mostly = { .notifier_call = macvlan_device_event, }; static int __init macvlan_init_module(void) { int err; register_netdevice_notifier(&macvlan_notifier_block); err = macvlan_link_register(&macvlan_link_ops); if (err < 0) goto err1; return 0; err1: unregister_netdevice_notifier(&macvlan_notifier_block); return err; } static void __exit macvlan_cleanup_module(void) { rtnl_link_unregister(&macvlan_link_ops); unregister_netdevice_notifier(&macvlan_notifier_block); } module_init(macvlan_init_module); module_exit(macvlan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <[email protected]>"); MODULE_DESCRIPTION("Driver for MAC address based VLANs"); MODULE_ALIAS_RTNL_LINK("macvlan");
linux-master
drivers/net/macvlan.c
// SPDX-License-Identifier: GPL-2.0-or-later /* drivers/net/ifb.c: The purpose of this driver is to provide a device that allows for sharing of resources: 1) qdiscs/policies that are per device as opposed to system wide. ifb allows for a device which can be redirected to thus providing an impression of sharing. 2) Allows for queueing incoming traffic for shaping instead of dropping. The original concept is based on what is known as the IMQ driver initially written by Martin Devera, later rewritten by Patrick McHardy and then maintained by Andre Correa. You need the tc action mirror or redirect to feed this device packets. Authors: Jamal Hadi Salim (2005) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/netfilter_netdev.h> #include <net/pkt_sched.h> #include <net/net_namespace.h> #define TX_Q_LIMIT 32 struct ifb_q_stats { u64 packets; u64 bytes; struct u64_stats_sync sync; }; struct ifb_q_private { struct net_device *dev; struct tasklet_struct ifb_tasklet; int tasklet_pending; int txqnum; struct sk_buff_head rq; struct sk_buff_head tq; struct ifb_q_stats rx_stats; struct ifb_q_stats tx_stats; } ____cacheline_aligned_in_smp; struct ifb_dev_private { struct ifb_q_private *tx_private; }; /* For ethtools stats. */ struct ifb_q_stats_desc { char desc[ETH_GSTRING_LEN]; size_t offset; }; #define IFB_Q_STAT(m) offsetof(struct ifb_q_stats, m) static const struct ifb_q_stats_desc ifb_q_stats_desc[] = { { "packets", IFB_Q_STAT(packets) }, { "bytes", IFB_Q_STAT(bytes) }, }; #define IFB_Q_STATS_LEN ARRAY_SIZE(ifb_q_stats_desc) static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); static void ifb_update_q_stats(struct ifb_q_stats *stats, int len) { u64_stats_update_begin(&stats->sync); stats->packets++; stats->bytes += len; u64_stats_update_end(&stats->sync); } static void ifb_ri_tasklet(struct tasklet_struct *t) { struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet); struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(txp->dev, txp->txqnum); skb = skb_peek(&txp->tq); if (!skb) { if (!__netif_tx_trylock(txq)) goto resched; skb_queue_splice_tail_init(&txp->rq, &txp->tq); __netif_tx_unlock(txq); } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { /* Skip tc and netfilter to prevent redirection loop. */ skb->redirected = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_skip_classify = 1; #endif nf_skip_egress(skb, true); ifb_update_q_stats(&txp->tx_stats, skb->len); rcu_read_lock(); skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); txp->dev->stats.tx_dropped++; if (skb_queue_len(&txp->tq) != 0) goto resched; break; } rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; if (!skb->from_ingress) { dev_queue_xmit(skb); } else { skb_pull_rcsum(skb, skb->mac_len); netif_receive_skb(skb); } } if (__netif_tx_trylock(txq)) { skb = skb_peek(&txp->rq); if (!skb) { txp->tasklet_pending = 0; if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); } else { __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: txp->tasklet_pending = 1; tasklet_schedule(&txp->ifb_tasklet); } } static void ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp = dp->tx_private; unsigned int start; u64 packets, bytes; int i; for (i = 0; i < dev->num_tx_queues; i++,txp++) { do { start = u64_stats_fetch_begin(&txp->rx_stats.sync); packets = txp->rx_stats.packets; bytes = txp->rx_stats.bytes; } while (u64_stats_fetch_retry(&txp->rx_stats.sync, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; do { start = u64_stats_fetch_begin(&txp->tx_stats.sync); packets = txp->tx_stats.packets; bytes = txp->tx_stats.bytes; } while (u64_stats_fetch_retry(&txp->tx_stats.sync, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; } static int ifb_dev_init(struct net_device *dev) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp; int i; txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); if (!txp) return -ENOMEM; dp->tx_private = txp; for (i = 0; i < dev->num_tx_queues; i++,txp++) { txp->txqnum = i; txp->dev = dev; __skb_queue_head_init(&txp->rq); __skb_queue_head_init(&txp->tq); u64_stats_init(&txp->rx_stats.sync); u64_stats_init(&txp->tx_stats.sync); tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet); netif_tx_start_queue(netdev_get_tx_queue(dev, i)); } return 0; } static void ifb_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { u8 *p = buf; int i, j; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < dev->real_num_rx_queues; i++) for (j = 0; j < IFB_Q_STATS_LEN; j++) ethtool_sprintf(&p, "rx_queue_%u_%.18s", i, ifb_q_stats_desc[j].desc); for (i = 0; i < dev->real_num_tx_queues; i++) for (j = 0; j < IFB_Q_STATS_LEN; j++) ethtool_sprintf(&p, "tx_queue_%u_%.18s", i, ifb_q_stats_desc[j].desc); break; } } static int ifb_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return IFB_Q_STATS_LEN * (dev->real_num_rx_queues + dev->real_num_tx_queues); default: return -EOPNOTSUPP; } } static void ifb_fill_stats_data(u64 **data, struct ifb_q_stats *q_stats) { void *stats_base = (void *)q_stats; unsigned int start; size_t offset; int j; do { start = u64_stats_fetch_begin(&q_stats->sync); for (j = 0; j < IFB_Q_STATS_LEN; j++) { offset = ifb_q_stats_desc[j].offset; (*data)[j] = *(u64 *)(stats_base + offset); } } while (u64_stats_fetch_retry(&q_stats->sync, start)); *data += IFB_Q_STATS_LEN; } static void ifb_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp; int i; for (i = 0; i < dev->real_num_rx_queues; i++) { txp = dp->tx_private + i; ifb_fill_stats_data(&data, &txp->rx_stats); } for (i = 0; i < dev->real_num_tx_queues; i++) { txp = dp->tx_private + i; ifb_fill_stats_data(&data, &txp->tx_stats); } } static const struct net_device_ops ifb_netdev_ops = { .ndo_open = ifb_open, .ndo_stop = ifb_close, .ndo_get_stats64 = ifb_stats64, .ndo_start_xmit = ifb_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_init = ifb_dev_init, }; static const struct ethtool_ops ifb_ethtool_ops = { .get_strings = ifb_get_strings, .get_sset_count = ifb_get_sset_count, .get_ethtool_stats = ifb_get_ethtool_stats, }; #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_TX) static void ifb_dev_free(struct net_device *dev) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp = dp->tx_private; int i; for (i = 0; i < dev->num_tx_queues; i++,txp++) { tasklet_kill(&txp->ifb_tasklet); __skb_queue_purge(&txp->rq); __skb_queue_purge(&txp->tq); } kfree(dp->tx_private); } static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ dev->netdev_ops = &ifb_netdev_ops; dev->ethtool_ops = &ifb_ethtool_ops; /* Fill in device structure with ethernet-generic values. */ ether_setup(dev); dev->tx_queue_len = TX_Q_LIMIT; dev->features |= IFB_FEATURES; dev->hw_features |= dev->features; dev->hw_enc_features |= dev->features; dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); eth_hw_addr_random(dev); dev->needs_free_netdev = true; dev->priv_destructor = ifb_dev_free; dev->min_mtu = 0; dev->max_mtu = 0; } static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); ifb_update_q_stats(&txp->rx_stats, skb->len); if (!skb->redirected || !skb->skb_iif) { dev_kfree_skb(skb); dev->stats.rx_dropped++; return NETDEV_TX_OK; } if (skb_queue_len(&txp->rq) >= dev->tx_queue_len) netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum)); __skb_queue_tail(&txp->rq, skb); if (!txp->tasklet_pending) { txp->tasklet_pending = 1; tasklet_schedule(&txp->ifb_tasklet); } return NETDEV_TX_OK; } static int ifb_close(struct net_device *dev) { netif_tx_stop_all_queues(dev); return 0; } static int ifb_open(struct net_device *dev) { netif_tx_start_all_queues(dev); return 0; } static int ifb_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } return 0; } static struct rtnl_link_ops ifb_link_ops __read_mostly = { .kind = "ifb", .priv_size = sizeof(struct ifb_dev_private), .setup = ifb_setup, .validate = ifb_validate, }; /* Number of ifb devices to be set up by this module. * Note that these legacy devices have one queue. * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb */ static int numifbs = 2; module_param(numifbs, int, 0); MODULE_PARM_DESC(numifbs, "Number of ifb devices"); static int __init ifb_init_one(int index) { struct net_device *dev_ifb; int err; dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d", NET_NAME_UNKNOWN, ifb_setup); if (!dev_ifb) return -ENOMEM; dev_ifb->rtnl_link_ops = &ifb_link_ops; err = register_netdevice(dev_ifb); if (err < 0) goto err; return 0; err: free_netdev(dev_ifb); return err; } static int __init ifb_init_module(void) { int i, err; down_write(&pernet_ops_rwsem); rtnl_lock(); err = __rtnl_link_register(&ifb_link_ops); if (err < 0) goto out; for (i = 0; i < numifbs && !err; i++) { err = ifb_init_one(i); cond_resched(); } if (err) __rtnl_link_unregister(&ifb_link_ops); out: rtnl_unlock(); up_write(&pernet_ops_rwsem); return err; } static void __exit ifb_cleanup_module(void) { rtnl_link_unregister(&ifb_link_ops); } module_init(ifb_init_module); module_exit(ifb_cleanup_module); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamal Hadi Salim"); MODULE_ALIAS_RTNL_LINK("ifb");
linux-master
drivers/net/ifb.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, Intel Corporation. */ /* This provides a net_failover interface for paravirtual drivers to * provide an alternate datapath by exporting APIs to create and * destroy a upper 'net_failover' netdev. The upper dev manages the * original paravirtual interface as a 'standby' netdev and uses the * generic failover infrastructure to register and manage a direct * attached VF as a 'primary' netdev. This enables live migration of * a VM with direct attached VF by failing over to the paravirtual * datapath when the VF is unplugged. * * Some of the netdev management routines are based on bond/team driver as * this driver provides active-backup functionality similar to those drivers. */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/netpoll.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/pci.h> #include <net/sch_generic.h> #include <uapi/linux/if_arp.h> #include <net/net_failover.h> static bool net_failover_xmit_ready(struct net_device *dev) { return netif_running(dev) && netif_carrier_ok(dev); } static int net_failover_open(struct net_device *dev) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *primary_dev, *standby_dev; int err; primary_dev = rtnl_dereference(nfo_info->primary_dev); if (primary_dev) { err = dev_open(primary_dev, NULL); if (err) goto err_primary_open; } standby_dev = rtnl_dereference(nfo_info->standby_dev); if (standby_dev) { err = dev_open(standby_dev, NULL); if (err) goto err_standby_open; } if ((primary_dev && net_failover_xmit_ready(primary_dev)) || (standby_dev && net_failover_xmit_ready(standby_dev))) { netif_carrier_on(dev); netif_tx_wake_all_queues(dev); } return 0; err_standby_open: if (primary_dev) dev_close(primary_dev); err_primary_open: netif_tx_disable(dev); return err; } static int net_failover_close(struct net_device *dev) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *slave_dev; netif_tx_disable(dev); slave_dev = rtnl_dereference(nfo_info->primary_dev); if (slave_dev) dev_close(slave_dev); slave_dev = rtnl_dereference(nfo_info->standby_dev); if (slave_dev) dev_close(slave_dev); return 0; } static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb, struct net_device *dev) { dev_core_stats_tx_dropped_inc(dev); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *xmit_dev; /* Try xmit via primary netdev followed by standby netdev */ xmit_dev = rcu_dereference_bh(nfo_info->primary_dev); if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) { xmit_dev = rcu_dereference_bh(nfo_info->standby_dev); if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) return net_failover_drop_xmit(skb, dev); } skb->dev = xmit_dev; skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; return dev_queue_xmit(skb); } static u16 net_failover_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *primary_dev; u16 txq; primary_dev = rcu_dereference(nfo_info->primary_dev); if (primary_dev) { const struct net_device_ops *ops = primary_dev->netdev_ops; if (ops->ndo_select_queue) txq = ops->ndo_select_queue(primary_dev, skb, sb_dev); else txq = netdev_pick_tx(primary_dev, skb, NULL); } else { txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; } /* Save the original txq to restore before passing to the driver */ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; if (unlikely(txq >= dev->real_num_tx_queues)) { do { txq -= dev->real_num_tx_queues; } while (txq >= dev->real_num_tx_queues); } return txq; } /* fold stats, assuming all rtnl_link_stats64 fields are u64, but * that some drivers can provide 32bit values only. */ static void net_failover_fold_stats(struct rtnl_link_stats64 *_res, const struct rtnl_link_stats64 *_new, const struct rtnl_link_stats64 *_old) { const u64 *new = (const u64 *)_new; const u64 *old = (const u64 *)_old; u64 *res = (u64 *)_res; int i; for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { u64 nv = new[i]; u64 ov = old[i]; s64 delta = nv - ov; /* detects if this particular field is 32bit only */ if (((nv | ov) >> 32) == 0) delta = (s64)(s32)((u32)nv - (u32)ov); /* filter anomalies, some drivers reset their stats * at down/up events. */ if (delta > 0) res[i] += delta; } } static void net_failover_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct net_failover_info *nfo_info = netdev_priv(dev); const struct rtnl_link_stats64 *new; struct rtnl_link_stats64 temp; struct net_device *slave_dev; spin_lock(&nfo_info->stats_lock); memcpy(stats, &nfo_info->failover_stats, sizeof(*stats)); rcu_read_lock(); slave_dev = rcu_dereference(nfo_info->primary_dev); if (slave_dev) { new = dev_get_stats(slave_dev, &temp); net_failover_fold_stats(stats, new, &nfo_info->primary_stats); memcpy(&nfo_info->primary_stats, new, sizeof(*new)); } slave_dev = rcu_dereference(nfo_info->standby_dev); if (slave_dev) { new = dev_get_stats(slave_dev, &temp); net_failover_fold_stats(stats, new, &nfo_info->standby_stats); memcpy(&nfo_info->standby_stats, new, sizeof(*new)); } rcu_read_unlock(); memcpy(&nfo_info->failover_stats, stats, sizeof(*stats)); spin_unlock(&nfo_info->stats_lock); } static int net_failover_change_mtu(struct net_device *dev, int new_mtu) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *primary_dev, *standby_dev; int ret = 0; primary_dev = rtnl_dereference(nfo_info->primary_dev); if (primary_dev) { ret = dev_set_mtu(primary_dev, new_mtu); if (ret) return ret; } standby_dev = rtnl_dereference(nfo_info->standby_dev); if (standby_dev) { ret = dev_set_mtu(standby_dev, new_mtu); if (ret) { if (primary_dev) dev_set_mtu(primary_dev, dev->mtu); return ret; } } dev->mtu = new_mtu; return 0; } static void net_failover_set_rx_mode(struct net_device *dev) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *slave_dev; rcu_read_lock(); slave_dev = rcu_dereference(nfo_info->primary_dev); if (slave_dev) { dev_uc_sync_multiple(slave_dev, dev); dev_mc_sync_multiple(slave_dev, dev); } slave_dev = rcu_dereference(nfo_info->standby_dev); if (slave_dev) { dev_uc_sync_multiple(slave_dev, dev); dev_mc_sync_multiple(slave_dev, dev); } rcu_read_unlock(); } static int net_failover_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *primary_dev, *standby_dev; int ret = 0; primary_dev = rcu_dereference(nfo_info->primary_dev); if (primary_dev) { ret = vlan_vid_add(primary_dev, proto, vid); if (ret) return ret; } standby_dev = rcu_dereference(nfo_info->standby_dev); if (standby_dev) { ret = vlan_vid_add(standby_dev, proto, vid); if (ret) if (primary_dev) vlan_vid_del(primary_dev, proto, vid); } return ret; } static int net_failover_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *slave_dev; slave_dev = rcu_dereference(nfo_info->primary_dev); if (slave_dev) vlan_vid_del(slave_dev, proto, vid); slave_dev = rcu_dereference(nfo_info->standby_dev); if (slave_dev) vlan_vid_del(slave_dev, proto, vid); return 0; } static const struct net_device_ops failover_dev_ops = { .ndo_open = net_failover_open, .ndo_stop = net_failover_close, .ndo_start_xmit = net_failover_start_xmit, .ndo_select_queue = net_failover_select_queue, .ndo_get_stats64 = net_failover_get_stats, .ndo_change_mtu = net_failover_change_mtu, .ndo_set_rx_mode = net_failover_set_rx_mode, .ndo_vlan_rx_add_vid = net_failover_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = net_failover_vlan_rx_kill_vid, .ndo_validate_addr = eth_validate_addr, .ndo_features_check = passthru_features_check, }; #define FAILOVER_NAME "net_failover" #define FAILOVER_VERSION "0.1" static void nfo_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver)); strscpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version)); } static int nfo_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *slave_dev; slave_dev = rtnl_dereference(nfo_info->primary_dev); if (!slave_dev || !net_failover_xmit_ready(slave_dev)) { slave_dev = rtnl_dereference(nfo_info->standby_dev); if (!slave_dev || !net_failover_xmit_ready(slave_dev)) { cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; cmd->base.speed = SPEED_UNKNOWN; return 0; } } return __ethtool_get_link_ksettings(slave_dev, cmd); } static const struct ethtool_ops failover_ethtool_ops = { .get_drvinfo = nfo_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = nfo_ethtool_get_link_ksettings, }; /* Called when slave dev is injecting data into network stack. * Change the associated network device from lower dev to failover dev. * note: already called with rcu_read_lock */ static rx_handler_result_t net_failover_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct net_device *dev = rcu_dereference(skb->dev->rx_handler_data); struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *primary_dev, *standby_dev; primary_dev = rcu_dereference(nfo_info->primary_dev); standby_dev = rcu_dereference(nfo_info->standby_dev); if (primary_dev && skb->dev == standby_dev) return RX_HANDLER_EXACT; skb->dev = dev; return RX_HANDLER_ANOTHER; } static void net_failover_compute_features(struct net_device *dev) { netdev_features_t vlan_features = FAILOVER_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; netdev_features_t enc_features = FAILOVER_ENC_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *primary_dev, *standby_dev; primary_dev = rcu_dereference(nfo_info->primary_dev); if (primary_dev) { vlan_features = netdev_increment_features(vlan_features, primary_dev->vlan_features, FAILOVER_VLAN_FEATURES); enc_features = netdev_increment_features(enc_features, primary_dev->hw_enc_features, FAILOVER_ENC_FEATURES); dst_release_flag &= primary_dev->priv_flags; if (primary_dev->hard_header_len > max_hard_header_len) max_hard_header_len = primary_dev->hard_header_len; } standby_dev = rcu_dereference(nfo_info->standby_dev); if (standby_dev) { vlan_features = netdev_increment_features(vlan_features, standby_dev->vlan_features, FAILOVER_VLAN_FEATURES); enc_features = netdev_increment_features(enc_features, standby_dev->hw_enc_features, FAILOVER_ENC_FEATURES); dst_release_flag &= standby_dev->priv_flags; if (standby_dev->hard_header_len > max_hard_header_len) max_hard_header_len = standby_dev->hard_header_len; } dev->vlan_features = vlan_features; dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; dev->hard_header_len = max_hard_header_len; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) dev->priv_flags |= IFF_XMIT_DST_RELEASE; netdev_change_features(dev); } static void net_failover_lower_state_changed(struct net_device *slave_dev, struct net_device *primary_dev, struct net_device *standby_dev) { struct netdev_lag_lower_state_info info; if (netif_carrier_ok(slave_dev)) info.link_up = true; else info.link_up = false; if (slave_dev == primary_dev) { if (netif_running(primary_dev)) info.tx_enabled = true; else info.tx_enabled = false; } else { if ((primary_dev && netif_running(primary_dev)) || (!netif_running(standby_dev))) info.tx_enabled = false; else info.tx_enabled = true; } netdev_lower_state_changed(slave_dev, &info); } static int net_failover_slave_pre_register(struct net_device *slave_dev, struct net_device *failover_dev) { struct net_device *standby_dev, *primary_dev; struct net_failover_info *nfo_info; bool slave_is_standby; nfo_info = netdev_priv(failover_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); primary_dev = rtnl_dereference(nfo_info->primary_dev); slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; if (slave_is_standby ? standby_dev : primary_dev) { netdev_err(failover_dev, "%s attempting to register as slave dev when %s already present\n", slave_dev->name, slave_is_standby ? "standby" : "primary"); return -EINVAL; } /* We want to allow only a direct attached VF device as a primary * netdev. As there is no easy way to check for a VF device, restrict * this to a pci device. */ if (!slave_is_standby && (!slave_dev->dev.parent || !dev_is_pci(slave_dev->dev.parent))) return -EINVAL; if (failover_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(failover_dev)) { netdev_err(failover_dev, "Device %s is VLAN challenged and failover device has VLAN set up\n", failover_dev->name); return -EINVAL; } return 0; } static int net_failover_slave_register(struct net_device *slave_dev, struct net_device *failover_dev) { struct net_device *standby_dev, *primary_dev; struct net_failover_info *nfo_info; bool slave_is_standby; u32 orig_mtu; int err; /* Align MTU of slave with failover dev */ orig_mtu = slave_dev->mtu; err = dev_set_mtu(slave_dev, failover_dev->mtu); if (err) { netdev_err(failover_dev, "unable to change mtu of %s to %u register failed\n", slave_dev->name, failover_dev->mtu); goto done; } dev_hold(slave_dev); if (netif_running(failover_dev)) { err = dev_open(slave_dev, NULL); if (err && (err != -EBUSY)) { netdev_err(failover_dev, "Opening slave %s failed err:%d\n", slave_dev->name, err); goto err_dev_open; } } netif_addr_lock_bh(failover_dev); dev_uc_sync_multiple(slave_dev, failover_dev); dev_mc_sync_multiple(slave_dev, failover_dev); netif_addr_unlock_bh(failover_dev); err = vlan_vids_add_by_dev(slave_dev, failover_dev); if (err) { netdev_err(failover_dev, "Failed to add vlan ids to device %s err:%d\n", slave_dev->name, err); goto err_vlan_add; } nfo_info = netdev_priv(failover_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); primary_dev = rtnl_dereference(nfo_info->primary_dev); slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; if (slave_is_standby) { rcu_assign_pointer(nfo_info->standby_dev, slave_dev); standby_dev = slave_dev; dev_get_stats(standby_dev, &nfo_info->standby_stats); } else { rcu_assign_pointer(nfo_info->primary_dev, slave_dev); primary_dev = slave_dev; dev_get_stats(primary_dev, &nfo_info->primary_stats); failover_dev->min_mtu = slave_dev->min_mtu; failover_dev->max_mtu = slave_dev->max_mtu; } net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev); net_failover_compute_features(failover_dev); call_netdevice_notifiers(NETDEV_JOIN, slave_dev); netdev_info(failover_dev, "failover %s slave:%s registered\n", slave_is_standby ? "standby" : "primary", slave_dev->name); return 0; err_vlan_add: dev_uc_unsync(slave_dev, failover_dev); dev_mc_unsync(slave_dev, failover_dev); dev_close(slave_dev); err_dev_open: dev_put(slave_dev); dev_set_mtu(slave_dev, orig_mtu); done: return err; } static int net_failover_slave_pre_unregister(struct net_device *slave_dev, struct net_device *failover_dev) { struct net_device *standby_dev, *primary_dev; struct net_failover_info *nfo_info; nfo_info = netdev_priv(failover_dev); primary_dev = rtnl_dereference(nfo_info->primary_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); if (slave_dev != primary_dev && slave_dev != standby_dev) return -ENODEV; return 0; } static int net_failover_slave_unregister(struct net_device *slave_dev, struct net_device *failover_dev) { struct net_device *standby_dev, *primary_dev; struct net_failover_info *nfo_info; bool slave_is_standby; nfo_info = netdev_priv(failover_dev); primary_dev = rtnl_dereference(nfo_info->primary_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); if (WARN_ON_ONCE(slave_dev != primary_dev && slave_dev != standby_dev)) return -ENODEV; vlan_vids_del_by_dev(slave_dev, failover_dev); dev_uc_unsync(slave_dev, failover_dev); dev_mc_unsync(slave_dev, failover_dev); dev_close(slave_dev); nfo_info = netdev_priv(failover_dev); dev_get_stats(failover_dev, &nfo_info->failover_stats); slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; if (slave_is_standby) { RCU_INIT_POINTER(nfo_info->standby_dev, NULL); } else { RCU_INIT_POINTER(nfo_info->primary_dev, NULL); if (standby_dev) { failover_dev->min_mtu = standby_dev->min_mtu; failover_dev->max_mtu = standby_dev->max_mtu; } } dev_put(slave_dev); net_failover_compute_features(failover_dev); netdev_info(failover_dev, "failover %s slave:%s unregistered\n", slave_is_standby ? "standby" : "primary", slave_dev->name); return 0; } static int net_failover_slave_link_change(struct net_device *slave_dev, struct net_device *failover_dev) { struct net_device *primary_dev, *standby_dev; struct net_failover_info *nfo_info; nfo_info = netdev_priv(failover_dev); primary_dev = rtnl_dereference(nfo_info->primary_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); if (slave_dev != primary_dev && slave_dev != standby_dev) return -ENODEV; if ((primary_dev && net_failover_xmit_ready(primary_dev)) || (standby_dev && net_failover_xmit_ready(standby_dev))) { netif_carrier_on(failover_dev); netif_tx_wake_all_queues(failover_dev); } else { dev_get_stats(failover_dev, &nfo_info->failover_stats); netif_carrier_off(failover_dev); netif_tx_stop_all_queues(failover_dev); } net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev); return 0; } static int net_failover_slave_name_change(struct net_device *slave_dev, struct net_device *failover_dev) { struct net_device *primary_dev, *standby_dev; struct net_failover_info *nfo_info; nfo_info = netdev_priv(failover_dev); primary_dev = rtnl_dereference(nfo_info->primary_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); if (slave_dev != primary_dev && slave_dev != standby_dev) return -ENODEV; /* We need to bring up the slave after the rename by udev in case * open failed with EBUSY when it was registered. */ dev_open(slave_dev, NULL); return 0; } static struct failover_ops net_failover_ops = { .slave_pre_register = net_failover_slave_pre_register, .slave_register = net_failover_slave_register, .slave_pre_unregister = net_failover_slave_pre_unregister, .slave_unregister = net_failover_slave_unregister, .slave_link_change = net_failover_slave_link_change, .slave_name_change = net_failover_slave_name_change, .slave_handle_frame = net_failover_handle_frame, }; /** * net_failover_create - Create and register a failover instance * * @standby_dev: standby netdev * * Creates a failover netdev and registers a failover instance for a standby * netdev. Used by paravirtual drivers that use 3-netdev model. * The failover netdev acts as a master device and controls 2 slave devices - * the original standby netdev and a VF netdev with the same MAC gets * registered as primary netdev. * * Return: pointer to failover instance */ struct failover *net_failover_create(struct net_device *standby_dev) { struct device *dev = standby_dev->dev.parent; struct net_device *failover_dev; struct failover *failover; int err; /* Alloc at least 2 queues, for now we are going with 16 assuming * that VF devices being enslaved won't have too many queues. */ failover_dev = alloc_etherdev_mq(sizeof(struct net_failover_info), 16); if (!failover_dev) { dev_err(dev, "Unable to allocate failover_netdev!\n"); return ERR_PTR(-ENOMEM); } dev_net_set(failover_dev, dev_net(standby_dev)); SET_NETDEV_DEV(failover_dev, dev); failover_dev->netdev_ops = &failover_dev_ops; failover_dev->ethtool_ops = &failover_ethtool_ops; /* Initialize the device options */ failover_dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; failover_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); /* don't acquire failover netdev's netif_tx_lock when transmitting */ failover_dev->features |= NETIF_F_LLTX; /* Don't allow failover devices to change network namespaces. */ failover_dev->features |= NETIF_F_NETNS_LOCAL; failover_dev->hw_features = FAILOVER_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; failover_dev->features |= failover_dev->hw_features; dev_addr_set(failover_dev, standby_dev->dev_addr); failover_dev->min_mtu = standby_dev->min_mtu; failover_dev->max_mtu = standby_dev->max_mtu; err = register_netdev(failover_dev); if (err) { dev_err(dev, "Unable to register failover_dev!\n"); goto err_register_netdev; } netif_carrier_off(failover_dev); failover = failover_register(failover_dev, &net_failover_ops); if (IS_ERR(failover)) { err = PTR_ERR(failover); goto err_failover_register; } return failover; err_failover_register: unregister_netdev(failover_dev); err_register_netdev: free_netdev(failover_dev); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(net_failover_create); /** * net_failover_destroy - Destroy a failover instance * * @failover: pointer to failover instance * * Unregisters any slave netdevs associated with the failover instance by * calling failover_slave_unregister(). * unregisters the failover instance itself and finally frees the failover * netdev. Used by paravirtual drivers that use 3-netdev model. * */ void net_failover_destroy(struct failover *failover) { struct net_failover_info *nfo_info; struct net_device *failover_dev; struct net_device *slave_dev; if (!failover) return; failover_dev = rcu_dereference(failover->failover_dev); nfo_info = netdev_priv(failover_dev); netif_device_detach(failover_dev); rtnl_lock(); slave_dev = rtnl_dereference(nfo_info->primary_dev); if (slave_dev) failover_slave_unregister(slave_dev); slave_dev = rtnl_dereference(nfo_info->standby_dev); if (slave_dev) failover_slave_unregister(slave_dev); failover_unregister(failover); unregister_netdevice(failover_dev); rtnl_unlock(); free_netdev(failover_dev); } EXPORT_SYMBOL_GPL(net_failover_destroy); static __init int net_failover_init(void) { return 0; } module_init(net_failover_init); static __exit void net_failover_exit(void) { } module_exit(net_failover_exit); MODULE_DESCRIPTION("Failover driver for Paravirtual drivers"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/net_failover.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/drivers/net/netconsole.c * * Copyright (C) 2001 Ingo Molnar <[email protected]> * * This file contains the implementation of an IRQ-safe, crash-safe * kernel console implementation that outputs kernel messages to the * network. * * Modification history: * * 2001-09-17 started by Ingo Molnar. * 2003-08-11 2.6 port by Matt Mackall * simplified options * generic card hooks * works non-modular * 2003-09-07 rewritten with netpoll api */ /**************************************************************** * ****************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/console.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/netpoll.h> #include <linux/inet.h> #include <linux/configfs.h> #include <linux/etherdevice.h> #include <linux/utsname.h> MODULE_AUTHOR("Maintainer: Matt Mackall <[email protected]>"); MODULE_DESCRIPTION("Console driver for network interfaces"); MODULE_LICENSE("GPL"); #define MAX_PARAM_LENGTH 256 #define MAX_PRINT_CHUNK 1000 static char config[MAX_PARAM_LENGTH]; module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]"); static bool oops_only = false; module_param(oops_only, bool, 0600); MODULE_PARM_DESC(oops_only, "Only log oops messages"); #ifndef MODULE static int __init option_setup(char *opt) { strscpy(config, opt, MAX_PARAM_LENGTH); return 1; } __setup("netconsole=", option_setup); #endif /* MODULE */ /* Linked list of all configured targets */ static LIST_HEAD(target_list); /* This needs to be a spinlock because write_msg() cannot sleep */ static DEFINE_SPINLOCK(target_list_lock); /* * Console driver for extended netconsoles. Registered on the first use to * avoid unnecessarily enabling ext message formatting. */ static struct console netconsole_ext; /** * struct netconsole_target - Represents a configured netconsole target. * @list: Links this target into the target_list. * @item: Links us into the configfs subsystem hierarchy. * @enabled: On / off knob to enable / disable target. * Visible from userspace (read-write). * We maintain a strict 1:1 correspondence between this and * whether the corresponding netpoll is active or inactive. * Also, other parameters of a target may be modified at * runtime only when it is disabled (enabled == 0). * @extended: Denotes whether console is extended or not. * @release: Denotes whether kernel release version should be prepended * to the message. Depends on extended console. * @np: The netpoll structure for this target. * Contains the other userspace visible parameters: * dev_name (read-write) * local_port (read-write) * remote_port (read-write) * local_ip (read-write) * remote_ip (read-write) * local_mac (read-only) * remote_mac (read-write) */ struct netconsole_target { struct list_head list; #ifdef CONFIG_NETCONSOLE_DYNAMIC struct config_item item; #endif bool enabled; bool extended; bool release; struct netpoll np; }; #ifdef CONFIG_NETCONSOLE_DYNAMIC static struct configfs_subsystem netconsole_subsys; static DEFINE_MUTEX(dynamic_netconsole_mutex); static int __init dynamic_netconsole_init(void) { config_group_init(&netconsole_subsys.su_group); mutex_init(&netconsole_subsys.su_mutex); return configfs_register_subsystem(&netconsole_subsys); } static void __exit dynamic_netconsole_exit(void) { configfs_unregister_subsystem(&netconsole_subsys); } /* * Targets that were created by parsing the boot/module option string * do not exist in the configfs hierarchy (and have NULL names) and will * never go away, so make these a no-op for them. */ static void netconsole_target_get(struct netconsole_target *nt) { if (config_item_name(&nt->item)) config_item_get(&nt->item); } static void netconsole_target_put(struct netconsole_target *nt) { if (config_item_name(&nt->item)) config_item_put(&nt->item); } #else /* !CONFIG_NETCONSOLE_DYNAMIC */ static int __init dynamic_netconsole_init(void) { return 0; } static void __exit dynamic_netconsole_exit(void) { } /* * No danger of targets going away from under us when dynamic * reconfigurability is off. */ static void netconsole_target_get(struct netconsole_target *nt) { } static void netconsole_target_put(struct netconsole_target *nt) { } #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Allocate and initialize with defaults. * Note that these targets get their config_item fields zeroed-out. */ static struct netconsole_target *alloc_and_init(void) { struct netconsole_target *nt; nt = kzalloc(sizeof(*nt), GFP_KERNEL); if (!nt) return nt; if (IS_ENABLED(CONFIG_NETCONSOLE_EXTENDED_LOG)) nt->extended = true; if (IS_ENABLED(CONFIG_NETCONSOLE_PREPEND_RELEASE)) nt->release = true; nt->np.name = "netconsole"; strscpy(nt->np.dev_name, "eth0", IFNAMSIZ); nt->np.local_port = 6665; nt->np.remote_port = 6666; eth_broadcast_addr(nt->np.remote_mac); return nt; } /* Allocate new target (from boot/module param) and setup netpoll for it */ static struct netconsole_target *alloc_param_target(char *target_config) { struct netconsole_target *nt; int err; nt = alloc_and_init(); if (!nt) { err = -ENOMEM; goto fail; } if (*target_config == '+') { nt->extended = true; target_config++; } if (*target_config == 'r') { if (!nt->extended) { pr_err("Netconsole configuration error. Release feature requires extended log message"); err = -EINVAL; goto fail; } nt->release = true; target_config++; } /* Parse parameters and setup netpoll */ err = netpoll_parse_options(&nt->np, target_config); if (err) goto fail; err = netpoll_setup(&nt->np); if (err) goto fail; nt->enabled = true; return nt; fail: kfree(nt); return ERR_PTR(err); } /* Cleanup netpoll for given target (from boot/module param) and free it */ static void free_param_target(struct netconsole_target *nt) { netpoll_cleanup(&nt->np); kfree(nt); } #ifdef CONFIG_NETCONSOLE_DYNAMIC /* * Our subsystem hierarchy is: * * /sys/kernel/config/netconsole/ * | * <target>/ * | enabled * | release * | dev_name * | local_port * | remote_port * | local_ip * | remote_ip * | local_mac * | remote_mac * | * <target>/... */ static struct netconsole_target *to_target(struct config_item *item) { return item ? container_of(item, struct netconsole_target, item) : NULL; } /* * Attribute operations for netconsole_target. */ static ssize_t enabled_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->enabled); } static ssize_t extended_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->extended); } static ssize_t release_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->release); } static ssize_t dev_name_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%s\n", to_target(item)->np.dev_name); } static ssize_t local_port_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->np.local_port); } static ssize_t remote_port_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->np.remote_port); } static ssize_t local_ip_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item); if (nt->np.ipv6) return sysfs_emit(buf, "%pI6c\n", &nt->np.local_ip.in6); else return sysfs_emit(buf, "%pI4\n", &nt->np.local_ip); } static ssize_t remote_ip_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item); if (nt->np.ipv6) return sysfs_emit(buf, "%pI6c\n", &nt->np.remote_ip.in6); else return sysfs_emit(buf, "%pI4\n", &nt->np.remote_ip); } static ssize_t local_mac_show(struct config_item *item, char *buf) { struct net_device *dev = to_target(item)->np.dev; static const u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; return sysfs_emit(buf, "%pM\n", dev ? dev->dev_addr : bcast); } static ssize_t remote_mac_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac); } /* * This one is special -- targets created through the configfs interface * are not enabled (and the corresponding netpoll activated) by default. * The user is expected to set the desired parameters first (which * would enable him to dynamically add new netpoll targets for new * network interfaces as and when they come up). */ static ssize_t enabled_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); unsigned long flags; bool enabled; int err; mutex_lock(&dynamic_netconsole_mutex); err = kstrtobool(buf, &enabled); if (err) goto out_unlock; err = -EINVAL; if ((bool)enabled == nt->enabled) { pr_info("network logging has already %s\n", nt->enabled ? "started" : "stopped"); goto out_unlock; } if (enabled) { /* true */ if (nt->release && !nt->extended) { pr_err("Not enabling netconsole. Release feature requires extended log message"); goto out_unlock; } if (nt->extended && !console_is_registered(&netconsole_ext)) register_console(&netconsole_ext); /* * Skip netpoll_parse_options() -- all the attributes are * already configured via configfs. Just print them out. */ netpoll_print_options(&nt->np); err = netpoll_setup(&nt->np); if (err) goto out_unlock; pr_info("network logging started\n"); } else { /* false */ /* We need to disable the netconsole before cleaning it up * otherwise we might end up in write_msg() with * nt->np.dev == NULL and nt->enabled == true */ spin_lock_irqsave(&target_list_lock, flags); nt->enabled = false; spin_unlock_irqrestore(&target_list_lock, flags); netpoll_cleanup(&nt->np); } nt->enabled = enabled; mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return err; } static ssize_t release_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); bool release; int err; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->item)); err = -EINVAL; goto out_unlock; } err = kstrtobool(buf, &release); if (err) goto out_unlock; nt->release = release; mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return err; } static ssize_t extended_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); bool extended; int err; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->item)); err = -EINVAL; goto out_unlock; } err = kstrtobool(buf, &extended); if (err) goto out_unlock; nt->extended = extended; mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return err; } static ssize_t dev_name_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); size_t len; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->item)); mutex_unlock(&dynamic_netconsole_mutex); return -EINVAL; } strscpy(nt->np.dev_name, buf, IFNAMSIZ); /* Get rid of possible trailing newline from echo(1) */ len = strnlen(nt->np.dev_name, IFNAMSIZ); if (nt->np.dev_name[len - 1] == '\n') nt->np.dev_name[len - 1] = '\0'; mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); } static ssize_t local_port_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); int rv = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->item)); goto out_unlock; } rv = kstrtou16(buf, 10, &nt->np.local_port); if (rv < 0) goto out_unlock; mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return rv; } static ssize_t remote_port_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); int rv = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->item)); goto out_unlock; } rv = kstrtou16(buf, 10, &nt->np.remote_port); if (rv < 0) goto out_unlock; mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return rv; } static ssize_t local_ip_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->item)); goto out_unlock; } if (strnchr(buf, count, ':')) { const char *end; if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) { if (*end && *end != '\n') { pr_err("invalid IPv6 address at: <%c>\n", *end); goto out_unlock; } nt->np.ipv6 = true; } else goto out_unlock; } else { if (!nt->np.ipv6) { nt->np.local_ip.ip = in_aton(buf); } else goto out_unlock; } mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return -EINVAL; } static ssize_t remote_ip_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->item)); goto out_unlock; } if (strnchr(buf, count, ':')) { const char *end; if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) { if (*end && *end != '\n') { pr_err("invalid IPv6 address at: <%c>\n", *end); goto out_unlock; } nt->np.ipv6 = true; } else goto out_unlock; } else { if (!nt->np.ipv6) { nt->np.remote_ip.ip = in_aton(buf); } else goto out_unlock; } mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return -EINVAL; } static ssize_t remote_mac_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); u8 remote_mac[ETH_ALEN]; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->item)); goto out_unlock; } if (!mac_pton(buf, remote_mac)) goto out_unlock; if (buf[3 * ETH_ALEN - 1] && buf[3 * ETH_ALEN - 1] != '\n') goto out_unlock; memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return -EINVAL; } CONFIGFS_ATTR(, enabled); CONFIGFS_ATTR(, extended); CONFIGFS_ATTR(, dev_name); CONFIGFS_ATTR(, local_port); CONFIGFS_ATTR(, remote_port); CONFIGFS_ATTR(, local_ip); CONFIGFS_ATTR(, remote_ip); CONFIGFS_ATTR_RO(, local_mac); CONFIGFS_ATTR(, remote_mac); CONFIGFS_ATTR(, release); static struct configfs_attribute *netconsole_target_attrs[] = { &attr_enabled, &attr_extended, &attr_release, &attr_dev_name, &attr_local_port, &attr_remote_port, &attr_local_ip, &attr_remote_ip, &attr_local_mac, &attr_remote_mac, NULL, }; /* * Item operations and type for netconsole_target. */ static void netconsole_target_release(struct config_item *item) { kfree(to_target(item)); } static struct configfs_item_operations netconsole_target_item_ops = { .release = netconsole_target_release, }; static const struct config_item_type netconsole_target_type = { .ct_attrs = netconsole_target_attrs, .ct_item_ops = &netconsole_target_item_ops, .ct_owner = THIS_MODULE, }; /* * Group operations and type for netconsole_subsys. */ static struct config_item *make_netconsole_target(struct config_group *group, const char *name) { struct netconsole_target *nt; unsigned long flags; nt = alloc_and_init(); if (!nt) return ERR_PTR(-ENOMEM); /* Initialize the config_item member */ config_item_init_type_name(&nt->item, name, &netconsole_target_type); /* Adding, but it is disabled */ spin_lock_irqsave(&target_list_lock, flags); list_add(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); return &nt->item; } static void drop_netconsole_target(struct config_group *group, struct config_item *item) { unsigned long flags; struct netconsole_target *nt = to_target(item); spin_lock_irqsave(&target_list_lock, flags); list_del(&nt->list); spin_unlock_irqrestore(&target_list_lock, flags); /* * The target may have never been enabled, or was manually disabled * before being removed so netpoll may have already been cleaned up. */ if (nt->enabled) netpoll_cleanup(&nt->np); config_item_put(&nt->item); } static struct configfs_group_operations netconsole_subsys_group_ops = { .make_item = make_netconsole_target, .drop_item = drop_netconsole_target, }; static const struct config_item_type netconsole_subsys_type = { .ct_group_ops = &netconsole_subsys_group_ops, .ct_owner = THIS_MODULE, }; /* The netconsole configfs subsystem */ static struct configfs_subsystem netconsole_subsys = { .su_group = { .cg_item = { .ci_namebuf = "netconsole", .ci_type = &netconsole_subsys_type, }, }, }; #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Handle network interface device notifications */ static int netconsole_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { unsigned long flags; struct netconsole_target *nt; struct net_device *dev = netdev_notifier_info_to_dev(ptr); bool stopped = false; if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || event == NETDEV_RELEASE || event == NETDEV_JOIN)) goto done; spin_lock_irqsave(&target_list_lock, flags); restart: list_for_each_entry(nt, &target_list, list) { netconsole_target_get(nt); if (nt->np.dev == dev) { switch (event) { case NETDEV_CHANGENAME: strscpy(nt->np.dev_name, dev->name, IFNAMSIZ); break; case NETDEV_RELEASE: case NETDEV_JOIN: case NETDEV_UNREGISTER: /* rtnl_lock already held * we might sleep in __netpoll_cleanup() */ spin_unlock_irqrestore(&target_list_lock, flags); __netpoll_cleanup(&nt->np); spin_lock_irqsave(&target_list_lock, flags); netdev_put(nt->np.dev, &nt->np.dev_tracker); nt->np.dev = NULL; nt->enabled = false; stopped = true; netconsole_target_put(nt); goto restart; } } netconsole_target_put(nt); } spin_unlock_irqrestore(&target_list_lock, flags); if (stopped) { const char *msg = "had an event"; switch (event) { case NETDEV_UNREGISTER: msg = "unregistered"; break; case NETDEV_RELEASE: msg = "released slaves"; break; case NETDEV_JOIN: msg = "is joining a master device"; break; } pr_info("network logging stopped on interface %s as it %s\n", dev->name, msg); } done: return NOTIFY_DONE; } static struct notifier_block netconsole_netdev_notifier = { .notifier_call = netconsole_netdev_event, }; /** * send_ext_msg_udp - send extended log message to target * @nt: target to send message to * @msg: extended log message to send * @msg_len: length of message * * Transfer extended log @msg to @nt. If @msg is longer than * MAX_PRINT_CHUNK, it'll be split and transmitted in multiple chunks with * ncfrag header field added to identify them. */ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, int msg_len) { static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */ const char *header, *body; int offset = 0; int header_len, body_len; const char *msg_ready = msg; const char *release; int release_len = 0; if (nt->release) { release = init_utsname()->release; release_len = strlen(release) + 1; } if (msg_len + release_len <= MAX_PRINT_CHUNK) { /* No fragmentation needed */ if (nt->release) { scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg); msg_len += release_len; msg_ready = buf; } netpoll_send_udp(&nt->np, msg_ready, msg_len); return; } /* need to insert extra header fields, detect header and body */ header = msg; body = memchr(msg, ';', msg_len); if (WARN_ON_ONCE(!body)) return; header_len = body - header; body_len = msg_len - header_len - 1; body++; /* * Transfer multiple chunks with the following extra header. * "ncfrag=<byte-offset>/<total-bytes>" */ if (nt->release) scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release); memcpy(buf + release_len, header, header_len); header_len += release_len; while (offset < body_len) { int this_header = header_len; int this_chunk; this_header += scnprintf(buf + this_header, sizeof(buf) - this_header, ",ncfrag=%d/%d;", offset, body_len); this_chunk = min(body_len - offset, MAX_PRINT_CHUNK - this_header); if (WARN_ON_ONCE(this_chunk <= 0)) return; memcpy(buf + this_header, body + offset, this_chunk); netpoll_send_udp(&nt->np, buf, this_header + this_chunk); offset += this_chunk; } } static void write_ext_msg(struct console *con, const char *msg, unsigned int len) { struct netconsole_target *nt; unsigned long flags; if ((oops_only && !oops_in_progress) || list_empty(&target_list)) return; spin_lock_irqsave(&target_list_lock, flags); list_for_each_entry(nt, &target_list, list) if (nt->extended && nt->enabled && netif_running(nt->np.dev)) send_ext_msg_udp(nt, msg, len); spin_unlock_irqrestore(&target_list_lock, flags); } static void write_msg(struct console *con, const char *msg, unsigned int len) { int frag, left; unsigned long flags; struct netconsole_target *nt; const char *tmp; if (oops_only && !oops_in_progress) return; /* Avoid taking lock and disabling interrupts unnecessarily */ if (list_empty(&target_list)) return; spin_lock_irqsave(&target_list_lock, flags); list_for_each_entry(nt, &target_list, list) { if (!nt->extended && nt->enabled && netif_running(nt->np.dev)) { /* * We nest this inside the for-each-target loop above * so that we're able to get as much logging out to * at least one target if we die inside here, instead * of unnecessarily keeping all targets in lock-step. */ tmp = msg; for (left = len; left;) { frag = min(left, MAX_PRINT_CHUNK); netpoll_send_udp(&nt->np, tmp, frag); tmp += frag; left -= frag; } } } spin_unlock_irqrestore(&target_list_lock, flags); } static struct console netconsole_ext = { .name = "netcon_ext", .flags = CON_ENABLED | CON_EXTENDED, .write = write_ext_msg, }; static struct console netconsole = { .name = "netcon", .flags = CON_ENABLED, .write = write_msg, }; static int __init init_netconsole(void) { int err; struct netconsole_target *nt, *tmp; bool extended = false; unsigned long flags; char *target_config; char *input = config; if (strnlen(input, MAX_PARAM_LENGTH)) { while ((target_config = strsep(&input, ";"))) { nt = alloc_param_target(target_config); if (IS_ERR(nt)) { err = PTR_ERR(nt); goto fail; } /* Dump existing printks when we register */ if (nt->extended) { extended = true; netconsole_ext.flags |= CON_PRINTBUFFER; } else { netconsole.flags |= CON_PRINTBUFFER; } spin_lock_irqsave(&target_list_lock, flags); list_add(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); } } err = register_netdevice_notifier(&netconsole_netdev_notifier); if (err) goto fail; err = dynamic_netconsole_init(); if (err) goto undonotifier; if (extended) register_console(&netconsole_ext); register_console(&netconsole); pr_info("network logging started\n"); return err; undonotifier: unregister_netdevice_notifier(&netconsole_netdev_notifier); fail: pr_err("cleaning up\n"); /* * Remove all targets and destroy them (only targets created * from the boot/module option exist here). Skipping the list * lock is safe here, and netpoll_cleanup() will sleep. */ list_for_each_entry_safe(nt, tmp, &target_list, list) { list_del(&nt->list); free_param_target(nt); } return err; } static void __exit cleanup_netconsole(void) { struct netconsole_target *nt, *tmp; if (console_is_registered(&netconsole_ext)) unregister_console(&netconsole_ext); unregister_console(&netconsole); dynamic_netconsole_exit(); unregister_netdevice_notifier(&netconsole_netdev_notifier); /* * Targets created via configfs pin references on our module * and would first be rmdir(2)'ed from userspace. We reach * here only when they are already destroyed, and only those * created from the boot/module option are left, so remove and * destroy them. Skipping the list lock is safe here, and * netpoll_cleanup() will sleep. */ list_for_each_entry_safe(nt, tmp, &target_list, list) { list_del(&nt->list); free_param_target(nt); } } /* * Use late_initcall to ensure netconsole is * initialized after network device driver if built-in. * * late_initcall() and module_init() are identical if built as module. */ late_initcall(init_netconsole); module_exit(cleanup_netconsole);
linux-master
drivers/net/netconsole.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 Intel Corporation. All rights reserved. * Copyright (C) 2015 EMC Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * BSD LICENSE * * Copyright(c) 2012 Intel Corporation. All rights reserved. * Copyright (C) 2015 EMC Corporation. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copy * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * PCIe NTB Network Linux driver * * Contact Information: * Jon Mason <[email protected]> */ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ntb.h> #include <linux/ntb_transport.h> #define NTB_NETDEV_VER "0.7" MODULE_DESCRIPTION(KBUILD_MODNAME); MODULE_VERSION(NTB_NETDEV_VER); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel Corporation"); /* Time in usecs for tx resource reaper */ static unsigned int tx_time = 1; /* Number of descriptors to free before resuming tx */ static unsigned int tx_start = 10; /* Number of descriptors still available before stop upper layer tx */ static unsigned int tx_stop = 5; struct ntb_netdev { struct pci_dev *pdev; struct net_device *ndev; struct ntb_transport_qp *qp; struct timer_list tx_timer; }; #define NTB_TX_TIMEOUT_MS 1000 #define NTB_RXQ_SIZE 100 static void ntb_netdev_event_handler(void *data, int link_is_up) { struct net_device *ndev = data; struct ntb_netdev *dev = netdev_priv(ndev); netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up, ntb_transport_link_query(dev->qp)); if (link_is_up) { if (ntb_transport_link_query(dev->qp)) netif_carrier_on(ndev); } else { netif_carrier_off(ndev); } } static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { struct net_device *ndev = qp_data; struct sk_buff *skb; int rc; skb = data; if (!skb) return; netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); if (len < 0) { ndev->stats.rx_errors++; ndev->stats.rx_length_errors++; goto enqueue_again; } skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; if (__netif_rx(skb) == NET_RX_DROP) { ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } else { ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; } skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); if (!skb) { ndev->stats.rx_errors++; ndev->stats.rx_frame_errors++; return; } enqueue_again: rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { dev_kfree_skb_any(skb); ndev->stats.rx_errors++; ndev->stats.rx_fifo_errors++; } } static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev, struct ntb_transport_qp *qp, int size) { struct ntb_netdev *dev = netdev_priv(netdev); netif_stop_queue(netdev); /* Make sure to see the latest value of ntb_transport_tx_free_entry() * since the queue was last started. */ smp_mb(); if (likely(ntb_transport_tx_free_entry(qp) < size)) { mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); return -EBUSY; } netif_start_queue(netdev); return 0; } static int ntb_netdev_maybe_stop_tx(struct net_device *ndev, struct ntb_transport_qp *qp, int size) { if (netif_queue_stopped(ndev) || (ntb_transport_tx_free_entry(qp) >= size)) return 0; return __ntb_netdev_maybe_stop_tx(ndev, qp, size); } static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { struct net_device *ndev = qp_data; struct sk_buff *skb; struct ntb_netdev *dev = netdev_priv(ndev); skb = data; if (!skb || !ndev) return; if (len > 0) { ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; } else { ndev->stats.tx_errors++; ndev->stats.tx_aborted_errors++; } dev_kfree_skb_any(skb); if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { /* Make sure anybody stopping the queue after this sees the new * value of ntb_transport_tx_free_entry() */ smp_mb(); if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } } static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); int rc; ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); if (rc) goto err; /* check for next submit */ ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); return NETDEV_TX_OK; err: ndev->stats.tx_dropped++; ndev->stats.tx_errors++; return NETDEV_TX_BUSY; } static void ntb_netdev_tx_timer(struct timer_list *t) { struct ntb_netdev *dev = from_timer(dev, t, tx_timer); struct net_device *ndev = dev->ndev; if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); } else { /* Make sure anybody stopping the queue after this sees the new * value of ntb_transport_tx_free_entry() */ smp_mb(); if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } } static int ntb_netdev_open(struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); struct sk_buff *skb; int rc, i, len; /* Add some empty rx bufs */ for (i = 0; i < NTB_RXQ_SIZE; i++) { skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); if (!skb) { rc = -ENOMEM; goto err; } rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { dev_kfree_skb(skb); goto err; } } timer_setup(&dev->tx_timer, ntb_netdev_tx_timer, 0); netif_carrier_off(ndev); ntb_transport_link_up(dev->qp); netif_start_queue(ndev); return 0; err: while ((skb = ntb_transport_rx_remove(dev->qp, &len))) dev_kfree_skb(skb); return rc; } static int ntb_netdev_close(struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); struct sk_buff *skb; int len; ntb_transport_link_down(dev->qp); while ((skb = ntb_transport_rx_remove(dev->qp, &len))) dev_kfree_skb(skb); del_timer_sync(&dev->tx_timer); return 0; } static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu) { struct ntb_netdev *dev = netdev_priv(ndev); struct sk_buff *skb; int len, rc; if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN) return -EINVAL; if (!netif_running(ndev)) { ndev->mtu = new_mtu; return 0; } /* Bring down the link and dispose of posted rx entries */ ntb_transport_link_down(dev->qp); if (ndev->mtu < new_mtu) { int i; for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++) dev_kfree_skb(skb); for (; i; i--) { skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN); if (!skb) { rc = -ENOMEM; goto err; } rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, new_mtu + ETH_HLEN); if (rc) { dev_kfree_skb(skb); goto err; } } } ndev->mtu = new_mtu; ntb_transport_link_up(dev->qp); return 0; err: ntb_transport_link_down(dev->qp); while ((skb = ntb_transport_rx_remove(dev->qp, &len))) dev_kfree_skb(skb); netdev_err(ndev, "Error changing MTU, device inoperable\n"); return rc; } static const struct net_device_ops ntb_netdev_ops = { .ndo_open = ntb_netdev_open, .ndo_stop = ntb_netdev_close, .ndo_start_xmit = ntb_netdev_start_xmit, .ndo_change_mtu = ntb_netdev_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static void ntb_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct ntb_netdev *dev = netdev_priv(ndev); strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strscpy(info->version, NTB_NETDEV_VER, sizeof(info->version)); strscpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info)); } static int ntb_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); ethtool_link_ksettings_zero_link_mode(cmd, advertising); ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane); cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_OTHER; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_ENABLE; return 0; } static const struct ethtool_ops ntb_ethtool_ops = { .get_drvinfo = ntb_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = ntb_get_link_ksettings, }; static const struct ntb_queue_handlers ntb_netdev_handlers = { .tx_handler = ntb_netdev_tx_handler, .rx_handler = ntb_netdev_rx_handler, .event_handler = ntb_netdev_event_handler, }; static int ntb_netdev_probe(struct device *client_dev) { struct ntb_dev *ntb; struct net_device *ndev; struct pci_dev *pdev; struct ntb_netdev *dev; int rc; ntb = dev_ntb(client_dev->parent); pdev = ntb->pdev; if (!pdev) return -ENODEV; ndev = alloc_etherdev(sizeof(*dev)); if (!ndev) return -ENOMEM; SET_NETDEV_DEV(ndev, client_dev); dev = netdev_priv(ndev); dev->ndev = ndev; dev->pdev = pdev; ndev->features = NETIF_F_HIGHDMA; ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ndev->hw_features = ndev->features; ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS); eth_random_addr(ndev->perm_addr); dev_addr_set(ndev, ndev->perm_addr); ndev->netdev_ops = &ntb_netdev_ops; ndev->ethtool_ops = &ntb_ethtool_ops; ndev->min_mtu = 0; ndev->max_mtu = ETH_MAX_MTU; dev->qp = ntb_transport_create_queue(ndev, client_dev, &ntb_netdev_handlers); if (!dev->qp) { rc = -EIO; goto err; } ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN; rc = register_netdev(ndev); if (rc) goto err1; dev_set_drvdata(client_dev, ndev); dev_info(&pdev->dev, "%s created\n", ndev->name); return 0; err1: ntb_transport_free_queue(dev->qp); err: free_netdev(ndev); return rc; } static void ntb_netdev_remove(struct device *client_dev) { struct net_device *ndev = dev_get_drvdata(client_dev); struct ntb_netdev *dev = netdev_priv(ndev); unregister_netdev(ndev); ntb_transport_free_queue(dev->qp); free_netdev(ndev); } static struct ntb_transport_client ntb_netdev_client = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .probe = ntb_netdev_probe, .remove = ntb_netdev_remove, }; static int __init ntb_netdev_init_module(void) { int rc; rc = ntb_transport_register_client_dev(KBUILD_MODNAME); if (rc) return rc; rc = ntb_transport_register_client(&ntb_netdev_client); if (rc) { ntb_transport_unregister_client_dev(KBUILD_MODNAME); return rc; } return 0; } late_initcall(ntb_netdev_init_module); static void __exit ntb_netdev_exit_module(void) { ntb_transport_unregister_client(&ntb_netdev_client); ntb_transport_unregister_client_dev(KBUILD_MODNAME); } module_exit(ntb_netdev_exit_module);
linux-master
drivers/net/ntb_netdev.c
// SPDX-License-Identifier: GPL-2.0-only /* * mdio.c: Generic support for MDIO-compatible transceivers * Copyright 2006-2009 Solarflare Communications Inc. */ #include <linux/kernel.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/mdio.h> #include <linux/module.h> MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers"); MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc."); MODULE_LICENSE("GPL"); /** * mdio45_probe - probe for an MDIO (clause 45) device * @mdio: MDIO interface * @prtad: Expected PHY address * * This sets @prtad and @mmds in the MDIO interface if successful. * Returns 0 on success, negative on error. */ int mdio45_probe(struct mdio_if_info *mdio, int prtad) { int mmd, stat2, devs1, devs2; /* Assume PHY must have at least one of PMA/PMD, WIS, PCS, PHY * XS or DTE XS; give up if none is present. */ for (mmd = 1; mmd <= 5; mmd++) { /* Is this MMD present? */ stat2 = mdio->mdio_read(mdio->dev, prtad, mmd, MDIO_STAT2); if (stat2 < 0 || (stat2 & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) continue; /* It should tell us about all the other MMDs */ devs1 = mdio->mdio_read(mdio->dev, prtad, mmd, MDIO_DEVS1); devs2 = mdio->mdio_read(mdio->dev, prtad, mmd, MDIO_DEVS2); if (devs1 < 0 || devs2 < 0) continue; mdio->prtad = prtad; mdio->mmds = devs1 | (devs2 << 16); return 0; } return -ENODEV; } EXPORT_SYMBOL(mdio45_probe); /** * mdio_set_flag - set or clear flag in an MDIO register * @mdio: MDIO interface * @prtad: PHY address * @devad: MMD address * @addr: Register address * @mask: Mask for flag (single bit set) * @sense: New value of flag * * This debounces changes: it does not write the register if the flag * already has the proper value. Returns 0 on success, negative on error. */ int mdio_set_flag(const struct mdio_if_info *mdio, int prtad, int devad, u16 addr, int mask, bool sense) { int old_val = mdio->mdio_read(mdio->dev, prtad, devad, addr); int new_val; if (old_val < 0) return old_val; if (sense) new_val = old_val | mask; else new_val = old_val & ~mask; if (old_val == new_val) return 0; return mdio->mdio_write(mdio->dev, prtad, devad, addr, new_val); } EXPORT_SYMBOL(mdio_set_flag); /** * mdio45_links_ok - is link status up/OK * @mdio: MDIO interface * @mmd_mask: Mask for MMDs to check * * Returns 1 if the PHY reports link status up/OK, 0 otherwise. * @mmd_mask is normally @mdio->mmds, but if loopback is enabled * the MMDs being bypassed should be excluded from the mask. */ int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmd_mask) { int devad, reg; if (!mmd_mask) { /* Use absence of XGMII faults in lieu of link state */ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PHYXS, MDIO_STAT2); return reg >= 0 && !(reg & MDIO_STAT2_RXFAULT); } for (devad = 0; mmd_mask; devad++) { if (mmd_mask & (1 << devad)) { mmd_mask &= ~(1 << devad); /* Reset the latched status and fault flags */ mdio->mdio_read(mdio->dev, mdio->prtad, devad, MDIO_STAT1); if (devad == MDIO_MMD_PMAPMD || devad == MDIO_MMD_PCS || devad == MDIO_MMD_PHYXS || devad == MDIO_MMD_DTEXS) mdio->mdio_read(mdio->dev, mdio->prtad, devad, MDIO_STAT2); /* Check the current status and fault flags */ reg = mdio->mdio_read(mdio->dev, mdio->prtad, devad, MDIO_STAT1); if (reg < 0 || (reg & (MDIO_STAT1_FAULT | MDIO_STAT1_LSTATUS)) != MDIO_STAT1_LSTATUS) return false; } } return true; } EXPORT_SYMBOL(mdio45_links_ok); /** * mdio45_nway_restart - restart auto-negotiation for this interface * @mdio: MDIO interface * * Returns 0 on success, negative on error. */ int mdio45_nway_restart(const struct mdio_if_info *mdio) { if (!(mdio->mmds & MDIO_DEVS_AN)) return -EOPNOTSUPP; mdio_set_flag(mdio, mdio->prtad, MDIO_MMD_AN, MDIO_CTRL1, MDIO_AN_CTRL1_RESTART, true); return 0; } EXPORT_SYMBOL(mdio45_nway_restart); static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr) { u32 result = 0; int reg; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, addr); if (reg & ADVERTISE_10HALF) result |= ADVERTISED_10baseT_Half; if (reg & ADVERTISE_10FULL) result |= ADVERTISED_10baseT_Full; if (reg & ADVERTISE_100HALF) result |= ADVERTISED_100baseT_Half; if (reg & ADVERTISE_100FULL) result |= ADVERTISED_100baseT_Full; if (reg & ADVERTISE_PAUSE_CAP) result |= ADVERTISED_Pause; if (reg & ADVERTISE_PAUSE_ASYM) result |= ADVERTISED_Asym_Pause; return result; } /** * mdio45_ethtool_gset_npage - get settings for ETHTOOL_GSET * @mdio: MDIO interface * @ecmd: Ethtool request structure * @npage_adv: Modes currently advertised on next pages * @npage_lpa: Modes advertised by link partner on next pages * * The @ecmd parameter is expected to have been cleared before calling * mdio45_ethtool_gset_npage(). * * Since the CSRs for auto-negotiation using next pages are not fully * standardised, this function does not attempt to decode them. The * caller must pass them in. */ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, struct ethtool_cmd *ecmd, u32 npage_adv, u32 npage_lpa) { int reg; u32 speed; BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22); BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45); ecmd->transceiver = XCVR_INTERNAL; ecmd->phy_address = mdio->prtad; ecmd->mdio_support = mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22); reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_CTRL2); switch (reg & MDIO_PMA_CTRL2_TYPE) { case MDIO_PMA_CTRL2_10GBT: case MDIO_PMA_CTRL2_1000BT: case MDIO_PMA_CTRL2_100BTX: case MDIO_PMA_CTRL2_10BT: ecmd->port = PORT_TP; ecmd->supported = SUPPORTED_TP; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_SPEED); if (reg & MDIO_SPEED_10G) ecmd->supported |= SUPPORTED_10000baseT_Full; if (reg & MDIO_PMA_SPEED_1000) ecmd->supported |= (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half); if (reg & MDIO_PMA_SPEED_100) ecmd->supported |= (SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half); if (reg & MDIO_PMA_SPEED_10) ecmd->supported |= (SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half); ecmd->advertising = ADVERTISED_TP; break; case MDIO_PMA_CTRL2_10GBCX4: ecmd->port = PORT_OTHER; ecmd->supported = 0; ecmd->advertising = 0; break; case MDIO_PMA_CTRL2_10GBKX4: case MDIO_PMA_CTRL2_10GBKR: case MDIO_PMA_CTRL2_1000BKX: ecmd->port = PORT_OTHER; ecmd->supported = SUPPORTED_Backplane; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); if (reg & MDIO_PMA_EXTABLE_10GBKX4) ecmd->supported |= SUPPORTED_10000baseKX4_Full; if (reg & MDIO_PMA_EXTABLE_10GBKR) ecmd->supported |= SUPPORTED_10000baseKR_Full; if (reg & MDIO_PMA_EXTABLE_1000BKX) ecmd->supported |= SUPPORTED_1000baseKX_Full; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECABLE); if (reg & MDIO_PMA_10GBR_FECABLE_ABLE) ecmd->supported |= SUPPORTED_10000baseR_FEC; ecmd->advertising = ADVERTISED_Backplane; break; /* All the other defined modes are flavours of optical */ default: ecmd->port = PORT_FIBRE; ecmd->supported = SUPPORTED_FIBRE; ecmd->advertising = ADVERTISED_FIBRE; break; } if (mdio->mmds & MDIO_DEVS_AN) { ecmd->supported |= SUPPORTED_Autoneg; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, MDIO_CTRL1); if (reg & MDIO_AN_CTRL1_ENABLE) { ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= ADVERTISED_Autoneg | mdio45_get_an(mdio, MDIO_AN_ADVERTISE) | npage_adv; } else { ecmd->autoneg = AUTONEG_DISABLE; } } else { ecmd->autoneg = AUTONEG_DISABLE; } if (ecmd->autoneg) { u32 modes = 0; int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, MDIO_STAT1); /* If AN is complete and successful, report best common * mode, otherwise report best advertised mode. */ if (an_stat & MDIO_AN_STAT1_COMPLETE) { ecmd->lp_advertising = mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa; if (an_stat & MDIO_AN_STAT1_LPABLE) ecmd->lp_advertising |= ADVERTISED_Autoneg; modes = ecmd->advertising & ecmd->lp_advertising; } if ((modes & ~ADVERTISED_Autoneg) == 0) modes = ecmd->advertising; if (modes & (ADVERTISED_10000baseT_Full | ADVERTISED_10000baseKX4_Full | ADVERTISED_10000baseKR_Full)) { speed = SPEED_10000; ecmd->duplex = DUPLEX_FULL; } else if (modes & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseKX_Full)) { speed = SPEED_1000; ecmd->duplex = !(modes & ADVERTISED_1000baseT_Half); } else if (modes & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { speed = SPEED_100; ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full); } else { speed = SPEED_10; ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full); } } else { /* Report forced settings */ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_CTRL1); speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1) * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10)); ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX || speed == SPEED_10000); } ethtool_cmd_speed_set(ecmd, speed); /* 10GBASE-T MDI/MDI-X */ if (ecmd->port == PORT_TP && (ethtool_cmd_speed(ecmd) == SPEED_10000)) { switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_SWAPPOL)) { case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: ecmd->eth_tp_mdix = ETH_TP_MDI; break; case 0: ecmd->eth_tp_mdix = ETH_TP_MDI_X; break; default: /* It's complicated... */ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; break; } } } EXPORT_SYMBOL(mdio45_ethtool_gset_npage); /** * mdio45_ethtool_ksettings_get_npage - get settings for ETHTOOL_GLINKSETTINGS * @mdio: MDIO interface * @cmd: Ethtool request structure * @npage_adv: Modes currently advertised on next pages * @npage_lpa: Modes advertised by link partner on next pages * * The @cmd parameter is expected to have been cleared before calling * mdio45_ethtool_ksettings_get_npage(). * * Since the CSRs for auto-negotiation using next pages are not fully * standardised, this function does not attempt to decode them. The * caller must pass them in. */ void mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, struct ethtool_link_ksettings *cmd, u32 npage_adv, u32 npage_lpa) { int reg; u32 speed, supported = 0, advertising = 0, lp_advertising = 0; BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22); BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45); cmd->base.phy_address = mdio->prtad; cmd->base.mdio_support = mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22); reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_CTRL2); switch (reg & MDIO_PMA_CTRL2_TYPE) { case MDIO_PMA_CTRL2_10GBT: case MDIO_PMA_CTRL2_1000BT: case MDIO_PMA_CTRL2_100BTX: case MDIO_PMA_CTRL2_10BT: cmd->base.port = PORT_TP; supported = SUPPORTED_TP; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_SPEED); if (reg & MDIO_SPEED_10G) supported |= SUPPORTED_10000baseT_Full; if (reg & MDIO_PMA_SPEED_1000) supported |= (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half); if (reg & MDIO_PMA_SPEED_100) supported |= (SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half); if (reg & MDIO_PMA_SPEED_10) supported |= (SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half); advertising = ADVERTISED_TP; break; case MDIO_PMA_CTRL2_10GBCX4: cmd->base.port = PORT_OTHER; supported = 0; advertising = 0; break; case MDIO_PMA_CTRL2_10GBKX4: case MDIO_PMA_CTRL2_10GBKR: case MDIO_PMA_CTRL2_1000BKX: cmd->base.port = PORT_OTHER; supported = SUPPORTED_Backplane; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); if (reg & MDIO_PMA_EXTABLE_10GBKX4) supported |= SUPPORTED_10000baseKX4_Full; if (reg & MDIO_PMA_EXTABLE_10GBKR) supported |= SUPPORTED_10000baseKR_Full; if (reg & MDIO_PMA_EXTABLE_1000BKX) supported |= SUPPORTED_1000baseKX_Full; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECABLE); if (reg & MDIO_PMA_10GBR_FECABLE_ABLE) supported |= SUPPORTED_10000baseR_FEC; advertising = ADVERTISED_Backplane; break; /* All the other defined modes are flavours of optical */ default: cmd->base.port = PORT_FIBRE; supported = SUPPORTED_FIBRE; advertising = ADVERTISED_FIBRE; break; } if (mdio->mmds & MDIO_DEVS_AN) { supported |= SUPPORTED_Autoneg; reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, MDIO_CTRL1); if (reg & MDIO_AN_CTRL1_ENABLE) { cmd->base.autoneg = AUTONEG_ENABLE; advertising |= ADVERTISED_Autoneg | mdio45_get_an(mdio, MDIO_AN_ADVERTISE) | npage_adv; } else { cmd->base.autoneg = AUTONEG_DISABLE; } } else { cmd->base.autoneg = AUTONEG_DISABLE; } if (cmd->base.autoneg) { u32 modes = 0; int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, MDIO_STAT1); /* If AN is complete and successful, report best common * mode, otherwise report best advertised mode. */ if (an_stat & MDIO_AN_STAT1_COMPLETE) { lp_advertising = mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa; if (an_stat & MDIO_AN_STAT1_LPABLE) lp_advertising |= ADVERTISED_Autoneg; modes = advertising & lp_advertising; } if ((modes & ~ADVERTISED_Autoneg) == 0) modes = advertising; if (modes & (ADVERTISED_10000baseT_Full | ADVERTISED_10000baseKX4_Full | ADVERTISED_10000baseKR_Full)) { speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; } else if (modes & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseKX_Full)) { speed = SPEED_1000; cmd->base.duplex = !(modes & ADVERTISED_1000baseT_Half); } else if (modes & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { speed = SPEED_100; cmd->base.duplex = !!(modes & ADVERTISED_100baseT_Full); } else { speed = SPEED_10; cmd->base.duplex = !!(modes & ADVERTISED_10baseT_Full); } } else { /* Report forced settings */ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_CTRL1); speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1) * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10)); cmd->base.duplex = (reg & MDIO_CTRL1_FULLDPLX || speed == SPEED_10000); } cmd->base.speed = speed; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, lp_advertising); /* 10GBASE-T MDI/MDI-X */ if (cmd->base.port == PORT_TP && (cmd->base.speed == SPEED_10000)) { switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_SWAPPOL)) { case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: cmd->base.eth_tp_mdix = ETH_TP_MDI; break; case 0: cmd->base.eth_tp_mdix = ETH_TP_MDI_X; break; default: /* It's complicated... */ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; break; } } } EXPORT_SYMBOL(mdio45_ethtool_ksettings_get_npage); /** * mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs * @mdio: MDIO interface * @mii_data: MII ioctl data structure * @cmd: MII ioctl command * * Returns 0 on success, negative on error. */ int mdio_mii_ioctl(const struct mdio_if_info *mdio, struct mii_ioctl_data *mii_data, int cmd) { int prtad, devad; u16 addr = mii_data->reg_num; /* Validate/convert cmd to one of SIOC{G,S}MIIREG */ switch (cmd) { case SIOCGMIIPHY: if (mdio->prtad == MDIO_PRTAD_NONE) return -EOPNOTSUPP; mii_data->phy_id = mdio->prtad; cmd = SIOCGMIIREG; break; case SIOCGMIIREG: case SIOCSMIIREG: break; default: return -EOPNOTSUPP; } /* Validate/convert phy_id */ if ((mdio->mode_support & MDIO_SUPPORTS_C45) && mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); } else if ((mdio->mode_support & MDIO_SUPPORTS_C22) && mii_data->phy_id < 0x20) { prtad = mii_data->phy_id; devad = MDIO_DEVAD_NONE; addr &= 0x1f; } else if ((mdio->mode_support & MDIO_EMULATE_C22) && mdio->prtad != MDIO_PRTAD_NONE && mii_data->phy_id == mdio->prtad) { /* Remap commonly-used MII registers. */ prtad = mdio->prtad; switch (addr) { case MII_BMCR: case MII_BMSR: case MII_PHYSID1: case MII_PHYSID2: devad = __ffs(mdio->mmds); break; case MII_ADVERTISE: case MII_LPA: if (!(mdio->mmds & MDIO_DEVS_AN)) return -EINVAL; devad = MDIO_MMD_AN; if (addr == MII_ADVERTISE) addr = MDIO_AN_ADVERTISE; else addr = MDIO_AN_LPA; break; default: return -EINVAL; } } else { return -EINVAL; } if (cmd == SIOCGMIIREG) { int rc = mdio->mdio_read(mdio->dev, prtad, devad, addr); if (rc < 0) return rc; mii_data->val_out = rc; return 0; } else { return mdio->mdio_write(mdio->dev, prtad, devad, addr, mii_data->val_in); } } EXPORT_SYMBOL(mdio_mii_ioctl);
linux-master
drivers/net/mdio.c
// SPDX-License-Identifier: GPL-2.0-only /* * PHY drivers for the sungem ethernet driver. * * This file could be shared with other drivers. * * (c) 2002-2007, Benjamin Herrenscmidt ([email protected]) * * TODO: * - Add support for PHYs that provide an IRQ line * - Eventually moved the entire polling state machine in * there (out of the eth driver), so that it can easily be * skipped on PHYs that implement it in hardware. * - On LXT971 & BCM5201, Apple uses some chip specific regs * to read the link status. Figure out why and if it makes * sense to do the same (magic aneg ?) * - Apple has some additional power management code for some * Broadcom PHYs that they "hide" from the OpenSource version * of darwin, still need to reverse engineer that */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/sungem_phy.h> /* Link modes of the BCM5400 PHY */ static const int phy_BCM5400_link_table[8][3] = { { 0, 0, 0 }, /* No link */ { 0, 0, 0 }, /* 10BT Half Duplex */ { 1, 0, 0 }, /* 10BT Full Duplex */ { 0, 1, 0 }, /* 100BT Half Duplex */ { 0, 1, 0 }, /* 100BT Half Duplex */ { 1, 1, 0 }, /* 100BT Full Duplex*/ { 1, 0, 1 }, /* 1000BT */ { 1, 0, 1 }, /* 1000BT */ }; static inline int __sungem_phy_read(struct mii_phy* phy, int id, int reg) { return phy->mdio_read(phy->dev, id, reg); } static inline void __sungem_phy_write(struct mii_phy* phy, int id, int reg, int val) { phy->mdio_write(phy->dev, id, reg, val); } static inline int sungem_phy_read(struct mii_phy* phy, int reg) { return phy->mdio_read(phy->dev, phy->mii_id, reg); } static inline void sungem_phy_write(struct mii_phy* phy, int reg, int val) { phy->mdio_write(phy->dev, phy->mii_id, reg, val); } static int reset_one_mii_phy(struct mii_phy* phy, int phy_id) { u16 val; int limit = 10000; val = __sungem_phy_read(phy, phy_id, MII_BMCR); val &= ~(BMCR_ISOLATE | BMCR_PDOWN); val |= BMCR_RESET; __sungem_phy_write(phy, phy_id, MII_BMCR, val); udelay(100); while (--limit) { val = __sungem_phy_read(phy, phy_id, MII_BMCR); if ((val & BMCR_RESET) == 0) break; udelay(10); } if ((val & BMCR_ISOLATE) && limit > 0) __sungem_phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); return limit <= 0; } static int bcm5201_init(struct mii_phy* phy) { u16 data; data = sungem_phy_read(phy, MII_BCM5201_MULTIPHY); data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE; sungem_phy_write(phy, MII_BCM5201_MULTIPHY, data); sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0); return 0; } static int bcm5201_suspend(struct mii_phy* phy) { sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0); sungem_phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE); return 0; } static int bcm5221_init(struct mii_phy* phy) { u16 data; data = sungem_phy_read(phy, MII_BCM5221_TEST); sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, data | MII_BCM5221_SHDOW_AUX_STAT2_APD); data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR); data = sungem_phy_read(phy, MII_BCM5221_TEST); sungem_phy_write(phy, MII_BCM5221_TEST, data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); return 0; } static int bcm5221_suspend(struct mii_phy* phy) { u16 data; data = sungem_phy_read(phy, MII_BCM5221_TEST); sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE); return 0; } static int bcm5241_init(struct mii_phy* phy) { u16 data; data = sungem_phy_read(phy, MII_BCM5221_TEST); sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, data | MII_BCM5221_SHDOW_AUX_STAT2_APD); data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); data = sungem_phy_read(phy, MII_BCM5221_TEST); sungem_phy_write(phy, MII_BCM5221_TEST, data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); return 0; } static int bcm5241_suspend(struct mii_phy* phy) { u16 data; data = sungem_phy_read(phy, MII_BCM5221_TEST); sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); return 0; } static int bcm5400_init(struct mii_phy* phy) { u16 data; /* Configure for gigabit full duplex */ data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL); data |= MII_BCM5400_AUXCONTROL_PWR10BASET; sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data); data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(100); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); data |= MII_BCM5201_MULTIPHY_SERIALMODE; __sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL); data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET; sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data); return 0; } static int bcm5400_suspend(struct mii_phy* phy) { #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); #endif return 0; } static int bcm5401_init(struct mii_phy* phy) { u16 data; int rev; rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f; if (rev == 0 || rev == 3) { /* Some revisions of 5401 appear to need this * initialisation sequence to disable, according * to OF, "tap power management" * * WARNING ! OF and Darwin don't agree on the * register addresses. OF seem to interpret the * register numbers below as decimal * * Note: This should (and does) match tg3_init_5401phy_dsp * in the tg3.c driver. -DaveM */ sungem_phy_write(phy, 0x18, 0x0c20); sungem_phy_write(phy, 0x17, 0x0012); sungem_phy_write(phy, 0x15, 0x1804); sungem_phy_write(phy, 0x17, 0x0013); sungem_phy_write(phy, 0x15, 0x1204); sungem_phy_write(phy, 0x17, 0x8006); sungem_phy_write(phy, 0x15, 0x0132); sungem_phy_write(phy, 0x17, 0x8006); sungem_phy_write(phy, 0x15, 0x0232); sungem_phy_write(phy, 0x17, 0x201f); sungem_phy_write(phy, 0x15, 0x0a20); } /* Configure for gigabit full duplex */ data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(10); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); data |= MII_BCM5201_MULTIPHY_SERIALMODE; __sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); return 0; } static int bcm5401_suspend(struct mii_phy* phy) { #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); #endif return 0; } static int bcm5411_init(struct mii_phy* phy) { u16 data; /* Here's some more Apple black magic to setup * some voltage stuffs. */ sungem_phy_write(phy, 0x1c, 0x8c23); sungem_phy_write(phy, 0x1c, 0x8ca3); sungem_phy_write(phy, 0x1c, 0x8c23); /* Here, Apple seems to want to reset it, do * it as well */ sungem_phy_write(phy, MII_BMCR, BMCR_RESET); sungem_phy_write(phy, MII_BMCR, 0x1340); data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(10); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); return 0; } static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) { u16 ctl, adv; phy->autoneg = 1; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = 0; phy->advertising = advertise; /* Setup standard advertise */ adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (advertise & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (advertise & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; sungem_phy_write(phy, MII_ADVERTISE, adv); /* Start/Restart aneg */ ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); sungem_phy_write(phy, MII_BMCR, ctl); return 0; } static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) { u16 ctl; phy->autoneg = 0; phy->speed = speed; phy->duplex = fd; phy->pause = 0; ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE); /* First reset the PHY */ sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch(speed) { case SPEED_10: break; case SPEED_100: ctl |= BMCR_SPEED100; break; case SPEED_1000: default: return -EINVAL; } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; sungem_phy_write(phy, MII_BMCR, ctl); return 0; } static int genmii_poll_link(struct mii_phy *phy) { u16 status; (void)sungem_phy_read(phy, MII_BMSR); status = sungem_phy_read(phy, MII_BMSR); if ((status & BMSR_LSTATUS) == 0) return 0; if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) return 0; return 1; } static int genmii_read_link(struct mii_phy *phy) { u16 lpa; if (phy->autoneg) { lpa = sungem_phy_read(phy, MII_LPA); if (lpa & (LPA_10FULL | LPA_100FULL)) phy->duplex = DUPLEX_FULL; else phy->duplex = DUPLEX_HALF; if (lpa & (LPA_100FULL | LPA_100HALF)) phy->speed = SPEED_100; else phy->speed = SPEED_10; phy->pause = 0; } /* On non-aneg, we assume what we put in BMCR is the speed, * though magic-aneg shouldn't prevent this case from occurring */ return 0; } static int generic_suspend(struct mii_phy* phy) { sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); return 0; } static int bcm5421_init(struct mii_phy* phy) { u16 data; unsigned int id; id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2)); /* Revision 0 of 5421 needs some fixups */ if (id == 0x002060e0) { /* This is borrowed from MacOS */ sungem_phy_write(phy, 0x18, 0x1007); data = sungem_phy_read(phy, 0x18); sungem_phy_write(phy, 0x18, data | 0x0400); sungem_phy_write(phy, 0x18, 0x0007); data = sungem_phy_read(phy, 0x18); sungem_phy_write(phy, 0x18, data | 0x0800); sungem_phy_write(phy, 0x17, 0x000a); data = sungem_phy_read(phy, 0x15); sungem_phy_write(phy, 0x15, data | 0x0200); } /* Pick up some init code from OF for K2 version */ if ((id & 0xfffffff0) == 0x002062e0) { sungem_phy_write(phy, 4, 0x01e1); sungem_phy_write(phy, 9, 0x0300); } /* Check if we can enable automatic low power */ #ifdef CONFIG_PPC_PMAC if (phy->platform_data) { struct device_node *np = of_get_parent(phy->platform_data); int can_low_power = 1; if (np == NULL || of_get_property(np, "no-autolowpower", NULL)) can_low_power = 0; of_node_put(np); if (can_low_power) { /* Enable automatic low-power */ sungem_phy_write(phy, 0x1c, 0x9002); sungem_phy_write(phy, 0x1c, 0xa821); sungem_phy_write(phy, 0x1c, 0x941d); } } #endif /* CONFIG_PPC_PMAC */ return 0; } static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) { u16 ctl, adv; phy->autoneg = 1; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = 0; phy->advertising = advertise; /* Setup standard advertise */ adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (advertise & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (advertise & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (advertise & ADVERTISED_Pause) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; sungem_phy_write(phy, MII_ADVERTISE, adv); /* Setup 1000BT advertise */ adv = sungem_phy_read(phy, MII_1000BASETCONTROL); adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP); if (advertise & SUPPORTED_1000baseT_Half) adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; if (advertise & SUPPORTED_1000baseT_Full) adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; sungem_phy_write(phy, MII_1000BASETCONTROL, adv); /* Start/Restart aneg */ ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); sungem_phy_write(phy, MII_BMCR, ctl); return 0; } static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd) { u16 ctl; phy->autoneg = 0; phy->speed = speed; phy->duplex = fd; phy->pause = 0; ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); /* First reset the PHY */ sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch(speed) { case SPEED_10: break; case SPEED_100: ctl |= BMCR_SPEED100; break; case SPEED_1000: ctl |= BMCR_SPD2; } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; // XXX Should we set the sungem to GII now on 1000BT ? sungem_phy_write(phy, MII_BMCR, ctl); return 0; } static int bcm54xx_read_link(struct mii_phy *phy) { int link_mode; u16 val; if (phy->autoneg) { val = sungem_phy_read(phy, MII_BCM5400_AUXSTATUS); link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); phy->duplex = phy_BCM5400_link_table[link_mode][0] ? DUPLEX_FULL : DUPLEX_HALF; phy->speed = phy_BCM5400_link_table[link_mode][2] ? SPEED_1000 : (phy_BCM5400_link_table[link_mode][1] ? SPEED_100 : SPEED_10); val = sungem_phy_read(phy, MII_LPA); phy->pause = (phy->duplex == DUPLEX_FULL) && ((val & LPA_PAUSE) != 0); } /* On non-aneg, we assume what we put in BMCR is the speed, * though magic-aneg shouldn't prevent this case from occurring */ return 0; } static int marvell88e1111_init(struct mii_phy* phy) { u16 rev; /* magic init sequence for rev 0 */ rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f; if (rev == 0) { sungem_phy_write(phy, 0x1d, 0x000a); sungem_phy_write(phy, 0x1e, 0x0821); sungem_phy_write(phy, 0x1d, 0x0006); sungem_phy_write(phy, 0x1e, 0x8600); sungem_phy_write(phy, 0x1d, 0x000b); sungem_phy_write(phy, 0x1e, 0x0100); sungem_phy_write(phy, 0x1d, 0x0004); sungem_phy_write(phy, 0x1e, 0x4850); } return 0; } #define BCM5421_MODE_MASK (1 << 5) static int bcm5421_poll_link(struct mii_phy* phy) { u32 phy_reg; int mode; /* find out in what mode we are */ sungem_phy_write(phy, MII_NCONFIG, 0x1000); phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5421_MODE_MASK) >> 5; if ( mode == BCM54XX_COPPER) return genmii_poll_link(phy); /* try to find out whether we have a link */ sungem_phy_write(phy, MII_NCONFIG, 0x2000); phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & 0x0020) return 0; else return 1; } static int bcm5421_read_link(struct mii_phy* phy) { u32 phy_reg; int mode; /* find out in what mode we are */ sungem_phy_write(phy, MII_NCONFIG, 0x1000); phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5421_MODE_MASK ) >> 5; if ( mode == BCM54XX_COPPER) return bcm54xx_read_link(phy); phy->speed = SPEED_1000; /* find out whether we are running half- or full duplex */ sungem_phy_write(phy, MII_NCONFIG, 0x2000); phy_reg = sungem_phy_read(phy, MII_NCONFIG); if ( (phy_reg & 0x0080) >> 7) phy->duplex |= DUPLEX_HALF; else phy->duplex |= DUPLEX_FULL; return 0; } static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg) { /* enable fiber mode */ sungem_phy_write(phy, MII_NCONFIG, 0x9020); /* LEDs active in both modes, autosense prio = fiber */ sungem_phy_write(phy, MII_NCONFIG, 0x945f); if (!autoneg) { /* switch off fibre autoneg */ sungem_phy_write(phy, MII_NCONFIG, 0xfc01); sungem_phy_write(phy, 0x0b, 0x0004); } phy->autoneg = autoneg; return 0; } #define BCM5461_FIBER_LINK (1 << 2) #define BCM5461_MODE_MASK (3 << 1) static int bcm5461_poll_link(struct mii_phy* phy) { u32 phy_reg; int mode; /* find out in what mode we are */ sungem_phy_write(phy, MII_NCONFIG, 0x7c00); phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; if ( mode == BCM54XX_COPPER) return genmii_poll_link(phy); /* find out whether we have a link */ sungem_phy_write(phy, MII_NCONFIG, 0x7000); phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & BCM5461_FIBER_LINK) return 1; else return 0; } #define BCM5461_FIBER_DUPLEX (1 << 3) static int bcm5461_read_link(struct mii_phy* phy) { u32 phy_reg; int mode; /* find out in what mode we are */ sungem_phy_write(phy, MII_NCONFIG, 0x7c00); phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; if ( mode == BCM54XX_COPPER) { return bcm54xx_read_link(phy); } phy->speed = SPEED_1000; /* find out whether we are running half- or full duplex */ sungem_phy_write(phy, MII_NCONFIG, 0x7000); phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & BCM5461_FIBER_DUPLEX) phy->duplex |= DUPLEX_FULL; else phy->duplex |= DUPLEX_HALF; return 0; } static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg) { /* select fiber mode, enable 1000 base-X registers */ sungem_phy_write(phy, MII_NCONFIG, 0xfc0b); if (autoneg) { /* enable fiber with no autonegotiation */ sungem_phy_write(phy, MII_ADVERTISE, 0x01e0); sungem_phy_write(phy, MII_BMCR, 0x1140); } else { /* enable fiber with autonegotiation */ sungem_phy_write(phy, MII_BMCR, 0x0140); } phy->autoneg = autoneg; return 0; } static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) { u16 ctl, adv; phy->autoneg = 1; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = 0; phy->advertising = advertise; /* Setup standard advertise */ adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (advertise & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (advertise & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (advertise & ADVERTISED_Pause) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; sungem_phy_write(phy, MII_ADVERTISE, adv); /* Setup 1000BT advertise & enable crossover detect * XXX How do we advertise 1000BT ? Darwin source is * confusing here, they read from specific control and * write to control... Someone has specs for those * beasts ? */ adv = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX; adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | MII_1000BASETCONTROL_HALFDUPLEXCAP); if (advertise & SUPPORTED_1000baseT_Half) adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; if (advertise & SUPPORTED_1000baseT_Full) adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; sungem_phy_write(phy, MII_1000BASETCONTROL, adv); /* Start/Restart aneg */ ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); sungem_phy_write(phy, MII_BMCR, ctl); return 0; } static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) { u16 ctl, ctl2; phy->autoneg = 0; phy->speed = speed; phy->duplex = fd; phy->pause = 0; ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); ctl |= BMCR_RESET; /* Select speed & duplex */ switch(speed) { case SPEED_10: break; case SPEED_100: ctl |= BMCR_SPEED100; break; /* I'm not sure about the one below, again, Darwin source is * quite confusing and I lack chip specs */ case SPEED_1000: ctl |= BMCR_SPD2; } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; /* Disable crossover. Again, the way Apple does it is strange, * though I don't assume they are wrong ;) */ ctl2 = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX | MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX | MII_1000BASETCONTROL_FULLDUPLEXCAP | MII_1000BASETCONTROL_HALFDUPLEXCAP); if (speed == SPEED_1000) ctl2 |= (fd == DUPLEX_FULL) ? MII_1000BASETCONTROL_FULLDUPLEXCAP : MII_1000BASETCONTROL_HALFDUPLEXCAP; sungem_phy_write(phy, MII_1000BASETCONTROL, ctl2); // XXX Should we set the sungem to GII now on 1000BT ? sungem_phy_write(phy, MII_BMCR, ctl); return 0; } static int marvell_read_link(struct mii_phy *phy) { u16 status, pmask; if (phy->autoneg) { status = sungem_phy_read(phy, MII_M1011_PHY_SPEC_STATUS); if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0) return -EAGAIN; if (status & MII_M1011_PHY_SPEC_STATUS_1000) phy->speed = SPEED_1000; else if (status & MII_M1011_PHY_SPEC_STATUS_100) phy->speed = SPEED_100; else phy->speed = SPEED_10; if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX) phy->duplex = DUPLEX_FULL; else phy->duplex = DUPLEX_HALF; pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE | MII_M1011_PHY_SPEC_STATUS_RX_PAUSE; phy->pause = (status & pmask) == pmask; } /* On non-aneg, we assume what we put in BMCR is the speed, * though magic-aneg shouldn't prevent this case from occurring */ return 0; } #define MII_BASIC_FEATURES \ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | \ SUPPORTED_Pause) /* On gigabit capable PHYs, we advertise Pause support but not asym pause * support for now as I'm not sure it's supported and Darwin doesn't do * it neither. --BenH. */ #define MII_GBIT_FEATURES \ (MII_BASIC_FEATURES | \ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) /* Broadcom BCM 5201 */ static const struct mii_phy_ops bcm5201_phy_ops = { .init = bcm5201_init, .suspend = bcm5201_suspend, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link, }; static struct mii_phy_def bcm5201_phy_def = { .phy_id = 0x00406210, .phy_id_mask = 0xfffffff0, .name = "BCM5201", .features = MII_BASIC_FEATURES, .magic_aneg = 1, .ops = &bcm5201_phy_ops }; /* Broadcom BCM 5221 */ static const struct mii_phy_ops bcm5221_phy_ops = { .suspend = bcm5221_suspend, .init = bcm5221_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link, }; static struct mii_phy_def bcm5221_phy_def = { .phy_id = 0x004061e0, .phy_id_mask = 0xfffffff0, .name = "BCM5221", .features = MII_BASIC_FEATURES, .magic_aneg = 1, .ops = &bcm5221_phy_ops }; /* Broadcom BCM 5241 */ static const struct mii_phy_ops bcm5241_phy_ops = { .suspend = bcm5241_suspend, .init = bcm5241_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link, }; static struct mii_phy_def bcm5241_phy_def = { .phy_id = 0x0143bc30, .phy_id_mask = 0xfffffff0, .name = "BCM5241", .features = MII_BASIC_FEATURES, .magic_aneg = 1, .ops = &bcm5241_phy_ops }; /* Broadcom BCM 5400 */ static const struct mii_phy_ops bcm5400_phy_ops = { .init = bcm5400_init, .suspend = bcm5400_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5400_phy_def = { .phy_id = 0x00206040, .phy_id_mask = 0xfffffff0, .name = "BCM5400", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5400_phy_ops }; /* Broadcom BCM 5401 */ static const struct mii_phy_ops bcm5401_phy_ops = { .init = bcm5401_init, .suspend = bcm5401_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5401_phy_def = { .phy_id = 0x00206050, .phy_id_mask = 0xfffffff0, .name = "BCM5401", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5401_phy_ops }; /* Broadcom BCM 5411 */ static const struct mii_phy_ops bcm5411_phy_ops = { .init = bcm5411_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5411_phy_def = { .phy_id = 0x00206070, .phy_id_mask = 0xfffffff0, .name = "BCM5411", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5411_phy_ops }; /* Broadcom BCM 5421 */ static const struct mii_phy_ops bcm5421_phy_ops = { .init = bcm5421_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = bcm5421_poll_link, .read_link = bcm5421_read_link, .enable_fiber = bcm5421_enable_fiber, }; static struct mii_phy_def bcm5421_phy_def = { .phy_id = 0x002060e0, .phy_id_mask = 0xfffffff0, .name = "BCM5421", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5421_phy_ops }; /* Broadcom BCM 5421 built-in K2 */ static const struct mii_phy_ops bcm5421k2_phy_ops = { .init = bcm5421_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5421k2_phy_def = { .phy_id = 0x002062e0, .phy_id_mask = 0xfffffff0, .name = "BCM5421-K2", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5421k2_phy_ops }; static const struct mii_phy_ops bcm5461_phy_ops = { .init = bcm5421_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = bcm5461_poll_link, .read_link = bcm5461_read_link, .enable_fiber = bcm5461_enable_fiber, }; static struct mii_phy_def bcm5461_phy_def = { .phy_id = 0x002060c0, .phy_id_mask = 0xfffffff0, .name = "BCM5461", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5461_phy_ops }; /* Broadcom BCM 5462 built-in Vesta */ static const struct mii_phy_ops bcm5462V_phy_ops = { .init = bcm5421_init, .suspend = generic_suspend, .setup_aneg = bcm54xx_setup_aneg, .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, }; static struct mii_phy_def bcm5462V_phy_def = { .phy_id = 0x002060d0, .phy_id_mask = 0xfffffff0, .name = "BCM5462-Vesta", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &bcm5462V_phy_ops }; /* Marvell 88E1101 amd 88E1111 */ static const struct mii_phy_ops marvell88e1101_phy_ops = { .suspend = generic_suspend, .setup_aneg = marvell_setup_aneg, .setup_forced = marvell_setup_forced, .poll_link = genmii_poll_link, .read_link = marvell_read_link }; static const struct mii_phy_ops marvell88e1111_phy_ops = { .init = marvell88e1111_init, .suspend = generic_suspend, .setup_aneg = marvell_setup_aneg, .setup_forced = marvell_setup_forced, .poll_link = genmii_poll_link, .read_link = marvell_read_link }; /* two revs in darwin for the 88e1101 ... I could use a datasheet * to get the proper names... */ static struct mii_phy_def marvell88e1101v1_phy_def = { .phy_id = 0x01410c20, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1101v1", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &marvell88e1101_phy_ops }; static struct mii_phy_def marvell88e1101v2_phy_def = { .phy_id = 0x01410c60, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1101v2", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &marvell88e1101_phy_ops }; static struct mii_phy_def marvell88e1111_phy_def = { .phy_id = 0x01410cc0, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1111", .features = MII_GBIT_FEATURES, .magic_aneg = 1, .ops = &marvell88e1111_phy_ops }; /* Generic implementation for most 10/100 PHYs */ static const struct mii_phy_ops generic_phy_ops = { .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def genmii_phy_def = { .phy_id = 0x00000000, .phy_id_mask = 0x00000000, .name = "Generic MII", .features = MII_BASIC_FEATURES, .magic_aneg = 0, .ops = &generic_phy_ops }; static struct mii_phy_def* mii_phy_table[] = { &bcm5201_phy_def, &bcm5221_phy_def, &bcm5241_phy_def, &bcm5400_phy_def, &bcm5401_phy_def, &bcm5411_phy_def, &bcm5421_phy_def, &bcm5421k2_phy_def, &bcm5461_phy_def, &bcm5462V_phy_def, &marvell88e1101v1_phy_def, &marvell88e1101v2_phy_def, &marvell88e1111_phy_def, &genmii_phy_def, NULL }; int sungem_phy_probe(struct mii_phy *phy, int mii_id) { int rc; u32 id; struct mii_phy_def* def; int i; /* We do not reset the mii_phy structure as the driver * may re-probe the PHY regulary */ phy->mii_id = mii_id; /* Take PHY out of isloate mode and reset it. */ rc = reset_one_mii_phy(phy, mii_id); if (rc) goto fail; /* Read ID and find matching entry */ id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2)); printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n", id, mii_id); for (i=0; (def = mii_phy_table[i]) != NULL; i++) if ((id & def->phy_id_mask) == def->phy_id) break; /* Should never be NULL (we have a generic entry), but... */ if (def == NULL) goto fail; phy->def = def; return 0; fail: phy->speed = 0; phy->duplex = 0; phy->pause = 0; phy->advertising = 0; return -ENODEV; } EXPORT_SYMBOL(sungem_phy_probe); MODULE_LICENSE("GPL");
linux-master
drivers/net/sungem_phy.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * TUN - Universal TUN/TAP device driver. * Copyright (C) 1999-2002 Maxim Krasnyansky <[email protected]> * * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ */ /* * Changes: * * Mike Kershaw <[email protected]> 2005/08/14 * Add TUNSETLINK ioctl to set the link encapsulation * * Mark Smith <[email protected]> * Use eth_random_addr() for tap MAC address. * * Harald Roelle <[email protected]> 2004/04/20 * Fixes in packet dropping, queue length setting and queue wakeup. * Increased default tx queue length. * Added ethtool API. * Minor cleanups * * Daniel Podlejski <[email protected]> * Modifications for 2.3.99-pre5 kernel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "tun" #define DRV_VERSION "1.6" #define DRV_DESCRIPTION "Universal TUN/TAP device driver" #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <[email protected]>" #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/miscdevice.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/compat.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_tun.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <net/xdp.h> #include <net/ip_tunnels.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/skb_array.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/mutex.h> #include <linux/ieee802154.h> #include <linux/if_ltalk.h> #include <uapi/linux/if_fddi.h> #include <uapi/linux/if_hippi.h> #include <uapi/linux/if_fc.h> #include <net/ax25.h> #include <net/rose.h> #include <net/6lowpan.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd); #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) /* TUN device flags */ /* IFF_ATTACH_QUEUE is never stored in device flags, * overload it to mean fasync when stored there. */ #define TUN_FASYNC IFF_ATTACH_QUEUE /* High bits in flags field are unused. */ #define TUN_VNET_LE 0x80000000 #define TUN_VNET_BE 0x40000000 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) #define GOODCOPY_LEN 128 #define FLT_EXACT_COUNT 8 struct tap_filter { unsigned int count; /* Number of addrs. Zero means disabled */ u32 mask[2]; /* Mask of the hashed addrs */ unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; }; /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal * to max number of VCPUs in guest. */ #define MAX_TAP_QUEUES 256 #define MAX_TAP_FLOWS 4096 #define TUN_FLOW_EXPIRE (3 * HZ) /* A tun_file connects an open character device to a tuntap netdevice. It * also contains all socket related structures (except sock_fprog and tap_filter) * to serve as one transmit queue for tuntap device. The sock_fprog and * tap_filter were kept in tun_struct since they were used for filtering for the * netdevice not for a specific queue (at least I didn't see the requirement for * this). * * RCU usage: * The tun_file and tun_struct are loosely coupled, the pointer from one to the * other can only be read while rcu_read_lock or rtnl_lock is held. */ struct tun_file { struct sock sk; struct socket socket; struct tun_struct __rcu *tun; struct fasync_struct *fasync; /* only used for fasnyc */ unsigned int flags; union { u16 queue_index; unsigned int ifindex; }; struct napi_struct napi; bool napi_enabled; bool napi_frags_enabled; struct mutex napi_mutex; /* Protects access to the above napi */ struct list_head next; struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; }; struct tun_page { struct page *page; int count; }; struct tun_flow_entry { struct hlist_node hash_link; struct rcu_head rcu; struct tun_struct *tun; u32 rxhash; u32 rps_rxhash; int queue_index; unsigned long updated ____cacheline_aligned_in_smp; }; #define TUN_NUM_FLOW_ENTRIES 1024 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) struct tun_prog { struct rcu_head rcu; struct bpf_prog *prog; }; /* Since the socket were moved to tun_file, to preserve the behavior of persist * device, socket filter, sndbuf and vnet header size were restore when the * file were attached to a persist device. */ struct tun_struct { struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; unsigned int numqueues; unsigned int flags; kuid_t owner; kgid_t group; struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4) int align; int vnet_hdr_sz; int sndbuf; struct tap_filter txflt; struct sock_fprog fprog; /* protected by rtnl lock */ bool filter_attached; u32 msg_enable; spinlock_t lock; struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; struct timer_list flow_gc_timer; unsigned long ageing_time; unsigned int numdisabled; struct list_head disabled; void *security; u32 flow_count; u32 rx_batched; atomic_long_t rx_frame_errors; struct bpf_prog __rcu *xdp_prog; struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; /* init args */ struct file *file; struct ifreq *ifr; }; struct veth { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; static void tun_flow_init(struct tun_struct *tun); static void tun_flow_uninit(struct tun_struct *tun); static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head process_queue; struct sk_buff *skb; int received = 0; __skb_queue_head_init(&process_queue); spin_lock(&queue->lock); skb_queue_splice_tail_init(queue, &process_queue); spin_unlock(&queue->lock); while (received < budget && (skb = __skb_dequeue(&process_queue))) { napi_gro_receive(napi, skb); ++received; } if (!skb_queue_empty(&process_queue)) { spin_lock(&queue->lock); skb_queue_splice(&process_queue, queue); spin_unlock(&queue->lock); } return received; } static int tun_napi_poll(struct napi_struct *napi, int budget) { unsigned int received; received = tun_napi_receive(napi, budget); if (received < budget) napi_complete_done(napi, received); return received; } static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, bool napi_en, bool napi_frags) { tfile->napi_enabled = napi_en; tfile->napi_frags_enabled = napi_en && napi_frags; if (napi_en) { netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); napi_enable(&tfile->napi); } } static void tun_napi_enable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_enable(&tfile->napi); } static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_disable(&tfile->napi); } static void tun_napi_del(struct tun_file *tfile) { if (tfile->napi_enabled) netif_napi_del(&tfile->napi); } static bool tun_napi_frags_enabled(const struct tun_file *tfile) { return tfile->napi_frags_enabled; } #ifdef CONFIG_TUN_VNET_CROSS_LE static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) { return tun->flags & TUN_VNET_BE ? false : virtio_legacy_is_little_endian(); } static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) { int be = !!(tun->flags & TUN_VNET_BE); if (put_user(be, argp)) return -EFAULT; return 0; } static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) { int be; if (get_user(be, argp)) return -EFAULT; if (be) tun->flags |= TUN_VNET_BE; else tun->flags &= ~TUN_VNET_BE; return 0; } #else static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) { return virtio_legacy_is_little_endian(); } static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) { return -EINVAL; } static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) { return -EINVAL; } #endif /* CONFIG_TUN_VNET_CROSS_LE */ static inline bool tun_is_little_endian(struct tun_struct *tun) { return tun->flags & TUN_VNET_LE || tun_legacy_is_little_endian(tun); } static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) { return __virtio16_to_cpu(tun_is_little_endian(tun), val); } static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) { return __cpu_to_virtio16(tun_is_little_endian(tun), val); } static inline u32 tun_hashfn(u32 rxhash) { return rxhash & TUN_MASK_FLOW_ENTRIES; } static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) { struct tun_flow_entry *e; hlist_for_each_entry_rcu(e, head, hash_link) { if (e->rxhash == rxhash) return e; } return NULL; } static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, struct hlist_head *head, u32 rxhash, u16 queue_index) { struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); if (e) { netif_info(tun, tx_queued, tun->dev, "create flow: hash %u index %u\n", rxhash, queue_index); e->updated = jiffies; e->rxhash = rxhash; e->rps_rxhash = 0; e->queue_index = queue_index; e->tun = tun; hlist_add_head_rcu(&e->hash_link, head); ++tun->flow_count; } return e; } static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) { netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", e->rxhash, e->queue_index); hlist_del_rcu(&e->hash_link); kfree_rcu(e, rcu); --tun->flow_count; } static void tun_flow_flush(struct tun_struct *tun) { int i; spin_lock_bh(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) tun_flow_delete(tun, e); } spin_unlock_bh(&tun->lock); } static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) { int i; spin_lock_bh(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { if (e->queue_index == queue_index) tun_flow_delete(tun, e); } } spin_unlock_bh(&tun->lock); } static void tun_flow_cleanup(struct timer_list *t) { struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); unsigned long delay = tun->ageing_time; unsigned long next_timer = jiffies + delay; unsigned long count = 0; int i; spin_lock(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { unsigned long this_timer; this_timer = e->updated + delay; if (time_before_eq(this_timer, jiffies)) { tun_flow_delete(tun, e); continue; } count++; if (time_before(this_timer, next_timer)) next_timer = this_timer; } } if (count) mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); spin_unlock(&tun->lock); } static void tun_flow_update(struct tun_struct *tun, u32 rxhash, struct tun_file *tfile) { struct hlist_head *head; struct tun_flow_entry *e; unsigned long delay = tun->ageing_time; u16 queue_index = tfile->queue_index; head = &tun->flows[tun_hashfn(rxhash)]; rcu_read_lock(); e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ if (READ_ONCE(e->queue_index) != queue_index) WRITE_ONCE(e->queue_index, queue_index); if (e->updated != jiffies) e->updated = jiffies; sock_rps_record_flow_hash(e->rps_rxhash); } else { spin_lock_bh(&tun->lock); if (!tun_flow_find(head, rxhash) && tun->flow_count < MAX_TAP_FLOWS) tun_flow_create(tun, head, rxhash, queue_index); if (!timer_pending(&tun->flow_gc_timer)) mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + delay)); spin_unlock_bh(&tun->lock); } rcu_read_unlock(); } /* Save the hash received in the stack receive path and update the * flow_hash table accordingly. */ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) { if (unlikely(e->rps_rxhash != hash)) e->rps_rxhash = hash; } /* We try to identify a flow through its rxhash. The reason that * we do not check rxq no. is because some cards(e.g 82599), chooses * the rxq based on the txq where the last packet of the flow comes. As * the userspace application move between processors, we may get a * different rxq no. here. */ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_flow_entry *e; u32 txq = 0; u32 numqueues = 0; numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); if (e) { tun_flow_save_rps_rxhash(e, txq); txq = e->queue_index; } else { /* use multiply and shift instead of expensive divide */ txq = ((u64)txq * numqueues) >> 32; } return txq; } static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_prog *prog; u32 numqueues; u16 ret = 0; numqueues = READ_ONCE(tun->numqueues); if (!numqueues) return 0; prog = rcu_dereference(tun->steering_prog); if (prog) ret = bpf_prog_run_clear_cb(prog->prog, skb); return ret % numqueues; } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct tun_struct *tun = netdev_priv(dev); u16 ret; rcu_read_lock(); if (rcu_dereference(tun->steering_prog)) ret = tun_ebpf_select_queue(tun, skb); else ret = tun_automq_select_queue(tun, skb); rcu_read_unlock(); return ret; } static inline bool tun_not_capable(struct tun_struct *tun) { const struct cred *cred = current_cred(); struct net *net = dev_net(tun->dev); return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || (gid_valid(tun->group) && !in_egroup_p(tun->group))) && !ns_capable(net->user_ns, CAP_NET_ADMIN); } static void tun_set_real_num_queues(struct tun_struct *tun) { netif_set_real_num_tx_queues(tun->dev, tun->numqueues); netif_set_real_num_rx_queues(tun->dev, tun->numqueues); } static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) { tfile->detached = tun; list_add_tail(&tfile->next, &tun->disabled); ++tun->numdisabled; } static struct tun_struct *tun_enable_queue(struct tun_file *tfile) { struct tun_struct *tun = tfile->detached; tfile->detached = NULL; list_del_init(&tfile->next); --tun->numdisabled; return tun; } void tun_ptr_free(void *ptr) { if (!ptr) return; if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); xdp_return_frame(xdpf); } else { __skb_array_destroy_skb(ptr); } } EXPORT_SYMBOL_GPL(tun_ptr_free); static void tun_queue_purge(struct tun_file *tfile) { void *ptr; while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) tun_ptr_free(ptr); skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); } static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; struct tun_struct *tun; tun = rtnl_dereference(tfile->tun); if (tun && clean) { if (!tfile->detached) tun_napi_disable(tfile); tun_napi_del(tfile); } if (tun && !tfile->detached) { u16 index = tfile->queue_index; BUG_ON(index >= tun->numqueues); rcu_assign_pointer(tun->tfiles[index], tun->tfiles[tun->numqueues - 1]); ntfile = rtnl_dereference(tun->tfiles[index]); ntfile->queue_index = index; rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], NULL); --tun->numqueues; if (clean) { RCU_INIT_POINTER(tfile->tun, NULL); sock_put(&tfile->sk); } else { tun_disable_queue(tun, tfile); tun_napi_disable(tfile); } synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); /* Drop read queue */ tun_queue_purge(tfile); tun_set_real_num_queues(tun); } else if (tfile->detached && clean) { tun = tun_enable_queue(tfile); sock_put(&tfile->sk); } if (clean) { if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { netif_carrier_off(tun->dev); if (!(tun->flags & IFF_PERSIST) && tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } if (tun) xdp_rxq_info_unreg(&tfile->xdp_rxq); ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); } } static void tun_detach(struct tun_file *tfile, bool clean) { struct tun_struct *tun; struct net_device *dev; rtnl_lock(); tun = rtnl_dereference(tfile->tun); dev = tun ? tun->dev : NULL; __tun_detach(tfile, clean); if (dev) netdev_state_change(dev); rtnl_unlock(); if (clean) sock_put(&tfile->sk); } static void tun_detach_all(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile, *tmp; int i, n = tun->numqueues; for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); BUG_ON(!tfile); tun_napi_disable(tfile); tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); --tun->numqueues; } list_for_each_entry(tfile, &tun->disabled, next) { tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); } BUG_ON(tun->numqueues != 0); synchronize_net(); for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); tun_napi_del(tfile); /* Drop read queue */ tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_napi_del(tfile); tun_enable_queue(tfile); tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } BUG_ON(tun->numdisabled != 0); if (tun->flags & IFF_PERSIST) module_put(THIS_MODULE); } static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter, bool napi, bool napi_frags, bool publish_tun) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; int err; err = security_tun_dev_attach(tfile->socket.sk, tun->security); if (err < 0) goto out; err = -EINVAL; if (rtnl_dereference(tfile->tun) && !tfile->detached) goto out; err = -EBUSY; if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) goto out; err = -E2BIG; if (!tfile->detached && tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) goto out; err = 0; /* Re-attach the filter to persist device */ if (!skip_filter && (tun->filter_attached == true)) { lock_sock(tfile->socket.sk); err = sk_attach_filter(&tun->fprog, tfile->socket.sk); release_sock(tfile->socket.sk); if (!err) goto out; } if (!tfile->detached && ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL, tun_ptr_free)) { err = -ENOMEM; goto out; } tfile->queue_index = tun->numqueues; tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; if (tfile->detached) { /* Re-attach detached tfile, updating XDP queue_index */ WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); if (tfile->xdp_rxq.queue_index != tfile->queue_index) tfile->xdp_rxq.queue_index = tfile->queue_index; } else { /* Setup XDP RX-queue info, for new tfile getting attached */ err = xdp_rxq_info_reg(&tfile->xdp_rxq, tun->dev, tfile->queue_index, 0); if (err < 0) goto out; err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err < 0) { xdp_rxq_info_unreg(&tfile->xdp_rxq); goto out; } err = 0; } if (tfile->detached) { tun_enable_queue(tfile); tun_napi_enable(tfile); } else { sock_hold(&tfile->sk); tun_napi_init(tun, tfile, napi, napi_frags); } if (rtnl_dereference(tun->xdp_prog)) sock_set_flag(&tfile->sk, SOCK_XDP); /* device is allowed to go away first, so no need to hold extra * refcnt. */ /* Publish tfile->tun and tun->tfiles only after we've fully * initialized tfile; otherwise we risk using half-initialized * object. */ if (publish_tun) rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; tun_set_real_num_queues(tun); out: return err; } static struct tun_struct *tun_get(struct tun_file *tfile) { struct tun_struct *tun; rcu_read_lock(); tun = rcu_dereference(tfile->tun); if (tun) dev_hold(tun->dev); rcu_read_unlock(); return tun; } static void tun_put(struct tun_struct *tun) { dev_put(tun->dev); } /* TAP filtering */ static void addr_hash_set(u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; mask[n >> 5] |= (1 << (n & 31)); } static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; return mask[n >> 5] & (1 << (n & 31)); } static int update_filter(struct tap_filter *filter, void __user *arg) { struct { u8 u[ETH_ALEN]; } *addr; struct tun_filter uf; int err, alen, n, nexact; if (copy_from_user(&uf, arg, sizeof(uf))) return -EFAULT; if (!uf.count) { /* Disabled */ filter->count = 0; return 0; } alen = ETH_ALEN * uf.count; addr = memdup_user(arg + sizeof(uf), alen); if (IS_ERR(addr)) return PTR_ERR(addr); /* The filter is updated without holding any locks. Which is * perfectly safe. We disable it first and in the worst * case we'll accept a few undesired packets. */ filter->count = 0; wmb(); /* Use first set of addresses as an exact filter */ for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) memcpy(filter->addr[n], addr[n].u, ETH_ALEN); nexact = n; /* Remaining multicast addresses are hashed, * unicast will leave the filter disabled. */ memset(filter->mask, 0, sizeof(filter->mask)); for (; n < uf.count; n++) { if (!is_multicast_ether_addr(addr[n].u)) { err = 0; /* no filter */ goto free_addr; } addr_hash_set(filter->mask, addr[n].u); } /* For ALLMULTI just set the mask to all ones. * This overrides the mask populated above. */ if ((uf.flags & TUN_FLT_ALLMULTI)) memset(filter->mask, ~0, sizeof(filter->mask)); /* Now enable the filter */ wmb(); filter->count = nexact; /* Return the number of exact filters */ err = nexact; free_addr: kfree(addr); return err; } /* Returns: 0 - drop, !=0 - accept */ static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) { /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect * at this point. */ struct ethhdr *eh = (struct ethhdr *) skb->data; int i; /* Exact match */ for (i = 0; i < filter->count; i++) if (ether_addr_equal(eh->h_dest, filter->addr[i])) return 1; /* Inexact match (multicast only) */ if (is_multicast_ether_addr(eh->h_dest)) return addr_hash_test(filter->mask, eh->h_dest); return 0; } /* * Checks whether the packet is accepted or not. * Returns: 0 - drop, !=0 - accept */ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) { if (!filter->count) return 1; return run_filter(filter, skb); } /* Network device part of the driver */ static const struct ethtool_ops tun_ethtool_ops; static int tun_net_init(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct ifreq *ifr = tun->ifr; int err; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; spin_lock_init(&tun->lock); err = security_tun_dev_alloc_security(&tun->security); if (err < 0) { free_percpu(dev->tstats); return err; } tun_flow_init(tun); dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->features = dev->hw_features | NETIF_F_LLTX; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, ifr->ifr_flags & IFF_NAPI_FRAGS, false); if (err < 0) { tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); free_percpu(dev->tstats); return err; } return 0; } /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { tun_detach_all(dev); } /* Net device open. */ static int tun_net_open(struct net_device *dev) { netif_tx_start_all_queues(dev); return 0; } /* Net device close. */ static int tun_net_close(struct net_device *dev) { netif_tx_stop_all_queues(dev); return 0; } /* Net device start xmit */ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) { #ifdef CONFIG_RPS if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ struct tun_flow_entry *e; __u32 rxhash; rxhash = __skb_get_hash_symmetric(skb); e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); if (e) tun_flow_save_rps_rxhash(e, rxhash); } #endif } static unsigned int run_ebpf_filter(struct tun_struct *tun, struct sk_buff *skb, int len) { struct tun_prog *prog = rcu_dereference(tun->filter_prog); if (prog) len = bpf_prog_run_clear_cb(prog->prog, skb); return len; } /* Net device start xmit */ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); enum skb_drop_reason drop_reason; int txq = skb->queue_mapping; struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; rcu_read_lock(); tfile = rcu_dereference(tun->tfiles[txq]); /* Drop packet if interface is not attached */ if (!tfile) { drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } if (!rcu_dereference(tun->steering_prog)) tun_automq_xmit(tun, skb); netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); /* Drop if the filter does not like it. * This is a noop if the filter is disabled. * Filter can be enabled only for the TAP devices. */ if (!check_filter(&tun->txflt, skb)) { drop_reason = SKB_DROP_REASON_TAP_TXFILTER; goto drop; } if (tfile->socket.sk->sk_filter && sk_filter(tfile->socket.sk, skb)) { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; goto drop; } len = run_ebpf_filter(tun, skb, len); if (len == 0) { drop_reason = SKB_DROP_REASON_TAP_FILTER; goto drop; } if (pskb_trim(skb, len)) { drop_reason = SKB_DROP_REASON_NOMEM; goto drop; } if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; } skb_tx_timestamp(skb); /* Orphan the skb - required as we might hang on to it * for indefinite time. */ skb_orphan(skb); nf_reset_ct(skb); if (ptr_ring_produce(&tfile->tx_ring, skb)) { drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; } /* NETIF_F_LLTX requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); txq_trans_cond_update(queue); /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); tfile->socket.sk->sk_data_ready(tfile->socket.sk); rcu_read_unlock(); return NETDEV_TX_OK; drop: dev_core_stats_tx_dropped_inc(dev); skb_tx_error(skb); kfree_skb_reason(skb, drop_reason); rcu_read_unlock(); return NET_XMIT_DROP; } static void tun_net_mclist(struct net_device *dev) { /* * This callback is supposed to deal with mc filter in * _rx_ path and has nothing to do with the _tx_ path. * In rx path we always accept everything userspace gives us. */ } static netdev_features_t tun_net_fix_features(struct net_device *dev, netdev_features_t features) { struct tun_struct *tun = netdev_priv(dev); return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } static void tun_set_headroom(struct net_device *dev, int new_hr) { struct tun_struct *tun = netdev_priv(dev); if (new_hr < NET_SKB_PAD) new_hr = NET_SKB_PAD; tun->align = new_hr; } static void tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct tun_struct *tun = netdev_priv(dev); dev_get_tstats64(dev, stats); stats->rx_frame_errors += (unsigned long)atomic_long_read(&tun->rx_frame_errors); } static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; struct bpf_prog *old_prog; int i; old_prog = rtnl_dereference(tun->xdp_prog); rcu_assign_pointer(tun->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); if (prog) sock_set_flag(&tfile->sk, SOCK_XDP); else sock_reset_flag(&tfile->sk, SOCK_XDP); } list_for_each_entry(tfile, &tun->disabled, next) { if (prog) sock_set_flag(&tfile->sk, SOCK_XDP); else sock_reset_flag(&tfile->sk, SOCK_XDP); } return 0; } static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return tun_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) { if (new_carrier) { struct tun_struct *tun = netdev_priv(dev); if (!tun->numqueues) return -EPERM; netif_carrier_on(dev); } else { netif_carrier_off(dev); } return 0; } static const struct net_device_ops tun_netdev_ops = { .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_select_queue = tun_select_queue, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, .ndo_change_carrier = tun_net_change_carrier, }; static void __tun_xdp_flush_tfile(struct tun_file *tfile) { /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); tfile->socket.sk->sk_data_ready(tfile->socket.sk); } static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; u32 numqueues; int nxmit = 0; int i; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; rcu_read_lock(); resample: numqueues = READ_ONCE(tun->numqueues); if (!numqueues) { rcu_read_unlock(); return -ENXIO; /* Caller will free/return all frames */ } tfile = rcu_dereference(tun->tfiles[smp_processor_id() % numqueues]); if (unlikely(!tfile)) goto resample; spin_lock(&tfile->tx_ring.producer_lock); for (i = 0; i < n; i++) { struct xdp_frame *xdp = frames[i]; /* Encode the XDP flag into lowest bit for consumer to differ * XDP buffer from sk_buff. */ void *frame = tun_xdp_to_ptr(xdp); if (__ptr_ring_produce(&tfile->tx_ring, frame)) { dev_core_stats_tx_dropped_inc(dev); break; } nxmit++; } spin_unlock(&tfile->tx_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) __tun_xdp_flush_tfile(tfile); rcu_read_unlock(); return nxmit; } static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) { struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); int nxmit; if (unlikely(!frame)) return -EOVERFLOW; nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); if (!nxmit) xdp_return_frame_rx_napi(frame); return nxmit; } static const struct net_device_ops tap_netdev_ops = { .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_set_rx_mode = tun_net_mclist, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_select_queue = tun_select_queue, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = dev_get_tstats64, .ndo_bpf = tun_xdp, .ndo_xdp_xmit = tun_xdp_xmit, .ndo_change_carrier = tun_net_change_carrier, }; static void tun_flow_init(struct tun_struct *tun) { int i; for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) INIT_HLIST_HEAD(&tun->flows[i]); tun->ageing_time = TUN_FLOW_EXPIRE; timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + tun->ageing_time)); } static void tun_flow_uninit(struct tun_struct *tun) { del_timer_sync(&tun->flow_gc_timer); tun_flow_flush(tun); } #define MIN_MTU 68 #define MAX_MTU 65535 /* Initialize net device. */ static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: dev->netdev_ops = &tun_netdev_ops; dev->header_ops = &ip_tunnel_header_ops; /* Point-to-Point TUN Device */ dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = 1500; /* Zero header length */ dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; break; case IFF_TAP: dev->netdev_ops = &tap_netdev_ops; /* Ethernet TAP Device */ ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; eth_hw_addr_random(dev); /* Currently tun does not support XDP, only tap does. */ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; break; } dev->min_mtu = MIN_MTU; dev->max_mtu = MAX_MTU - dev->hard_header_len; } static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) { struct sock *sk = tfile->socket.sk; return (tun->dev->flags & IFF_UP) && sock_writeable(sk); } /* Character device part */ /* Poll */ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) { struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); struct sock *sk; __poll_t mask = 0; if (!tun) return EPOLLERR; sk = tfile->socket.sk; poll_wait(file, sk_sleep(sk), wait); if (!ptr_ring_empty(&tfile->tx_ring)) mask |= EPOLLIN | EPOLLRDNORM; /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to * guarantee EPOLLOUT to be raised by either here or * tun_sock_write_space(). Then process could get notification * after it writes to a down device and meets -EIO. */ if (tun_sock_writeable(tun, tfile) || (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && tun_sock_writeable(tun, tfile))) mask |= EPOLLOUT | EPOLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) mask = EPOLLERR; tun_put(tun); return mask; } static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, size_t len, const struct iov_iter *it) { struct sk_buff *skb; size_t linear; int err; int i; if (it->nr_segs > MAX_SKB_FRAGS + 1 || len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) return ERR_PTR(-EMSGSIZE); local_bh_disable(); skb = napi_get_frags(&tfile->napi); local_bh_enable(); if (!skb) return ERR_PTR(-ENOMEM); linear = iov_iter_single_seg_count(it); err = __skb_grow(skb, linear); if (err) goto free; skb->len = len; skb->data_len = len - linear; skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { const struct iovec *iov = iter_iov(it); size_t fragsz = iov->iov_len; struct page *page; void *frag; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } frag = netdev_alloc_frag(fragsz); if (!frag) { err = -ENOMEM; goto free; } page = virt_to_head_page(frag); skb_fill_page_desc(skb, i - 1, page, frag - page_address(page), fragsz); } return skb; free: /* frees skb and all frags allocated with napi_alloc_frag() */ napi_free_frags(&tfile->napi); return ERR_PTR(err); } /* prepad is the amount to reserve at front. len is length after that. * linear is a hint as to how much to copy (usually headers). */ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, size_t prepad, size_t len, size_t linear, int noblock) { struct sock *sk = tfile->socket.sk; struct sk_buff *skb; int err; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE) linear = len; if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, &err, PAGE_ALLOC_COSTLY_ORDER); if (!skb) return ERR_PTR(err); skb_reserve(skb, prepad); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, int more) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head process_queue; u32 rx_batched = tun->rx_batched; bool rcv = false; if (!rx_batched || (!more && skb_queue_empty(queue))) { local_bh_disable(); skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); return; } spin_lock(&queue->lock); if (!more || skb_queue_len(queue) == rx_batched) { __skb_queue_head_init(&process_queue); skb_queue_splice_tail_init(queue, &process_queue); rcv = true; } else { __skb_queue_tail(queue, skb); } spin_unlock(&queue->lock); if (rcv) { struct sk_buff *nskb; local_bh_disable(); while ((nskb = __skb_dequeue(&process_queue))) { skb_record_rx_queue(nskb, tfile->queue_index); netif_receive_skb(nskb); } skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); } } static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, int len, int noblock, bool zerocopy) { if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) return false; if (tfile->socket.sk->sk_sndbuf != INT_MAX) return false; if (!noblock) return false; if (zerocopy) return false; if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; return true; } static struct sk_buff *__tun_build_skb(struct tun_file *tfile, struct page_frag *alloc_frag, char *buf, int buflen, int len, int pad) { struct sk_buff *skb = build_skb(buf, buflen); if (!skb) return ERR_PTR(-ENOMEM); skb_reserve(skb, pad); skb_put(skb, len); skb_set_owner_w(skb, tfile->socket.sk); get_page(alloc_frag->page); alloc_frag->offset += buflen; return skb; } static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, struct xdp_buff *xdp, u32 act) { int err; switch (act) { case XDP_REDIRECT: err = xdp_do_redirect(tun->dev, xdp, xdp_prog); if (err) return err; break; case XDP_TX: err = tun_xdp_tx(tun->dev, xdp); if (err < 0) return err; break; case XDP_PASS: break; default: bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); fallthrough; case XDP_DROP: dev_core_stats_rx_dropped_inc(tun->dev); break; } return act; } static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *from, struct virtio_net_hdr *hdr, int len, int *skb_xdp) { struct page_frag *alloc_frag = &current->task_frag; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); char *buf; size_t copied; int pad = TUN_RX_PAD; int err = 0; rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) pad += XDP_PACKET_HEADROOM; buflen += SKB_DATA_ALIGN(len + pad); rcu_read_unlock(); alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) return ERR_PTR(-ENOMEM); buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; copied = copy_page_from_iter(alloc_frag->page, alloc_frag->offset + pad, len, from); if (copied != len) return ERR_PTR(-EFAULT); /* There's a small window that XDP may be set after the check * of xdp_prog above, this should be rare and for simplicity * we do XDP on skb in case the headroom is not enough. */ if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); } *skb_xdp = 0; local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { struct xdp_buff xdp; u32 act; xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); xdp_prepare_buff(&xdp, buf, pad, len, false); act = bpf_prog_run_xdp(xdp_prog, &xdp); if (act == XDP_REDIRECT || act == XDP_TX) { get_page(alloc_frag->page); alloc_frag->offset += buflen; } err = tun_xdp_act(tun, xdp_prog, &xdp, act); if (err < 0) { if (act == XDP_REDIRECT || act == XDP_TX) put_page(alloc_frag->page); goto out; } if (err == XDP_REDIRECT) xdp_do_flush(); if (err != XDP_PASS) goto out; pad = xdp.data - xdp.data_hard_start; len = xdp.data_end - xdp.data; } rcu_read_unlock(); local_bh_enable(); return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); out: rcu_read_unlock(); local_bh_enable(); return NULL; } /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; size_t total_len = iov_iter_count(from); size_t len = total_len, align = tun->align, linear; struct virtio_net_hdr gso = { 0 }; int good_linear; int copylen; bool zerocopy = false; int err; u32 rxhash = 0; int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tfile); enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; len -= sizeof(pi); if (!copy_from_iter_full(&pi, sizeof(pi), from)) return -EFAULT; } if (tun->flags & IFF_VNET_HDR) { int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); if (len < vnet_hdr_sz) return -EINVAL; len -= vnet_hdr_sz; if (!copy_from_iter_full(&gso, sizeof(gso), from)) return -EFAULT; if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); if (tun16_to_cpu(tun, gso.hdr_len) > len) return -EINVAL; iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); } if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { align += NET_IP_ALIGN; if (unlikely(len < ETH_HLEN || (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) return -EINVAL; } good_linear = SKB_MAX_HEAD(align); if (msg_control) { struct iov_iter i = *from; /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; linear = copylen; iov_iter_advance(&i, copylen); if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { /* For the packet that is not easy to be processed * (e.g gso or jumbo packet), we will do it at after * skb was created with generic XDP routine. */ skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); err = PTR_ERR_OR_ZERO(skb); if (err) goto drop; if (!skb) return total_len; } else { if (!zerocopy) { copylen = len; if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) linear = good_linear; else linear = tun16_to_cpu(tun, gso.hdr_len); } if (frags) { mutex_lock(&tfile->napi_mutex); skb = tun_napi_alloc_frags(tfile, copylen, from); /* tun_napi_alloc_frags() enforces a layout for the skb. * If zerocopy is enabled, then this layout will be * overwritten by zerocopy_sg_from_iter(). */ zerocopy = false; } else { if (!linear) linear = min_t(size_t, good_linear, copylen); skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); } err = PTR_ERR_OR_ZERO(skb); if (err) goto drop; if (zerocopy) err = zerocopy_sg_from_iter(skb, from); else err = skb_copy_datagram_from_iter(skb, 0, from, len); if (err) { err = -EFAULT; drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; } } if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { atomic_long_inc(&tun->rx_frame_errors); err = -EINVAL; goto free_skb; } switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: if (tun->flags & IFF_NO_PI) { u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; switch (ip_version) { case 4: pi.proto = htons(ETH_P_IP); break; case 6: pi.proto = htons(ETH_P_IPV6); break; default: err = -EINVAL; goto drop; } } skb_reset_mac_header(skb); skb->protocol = pi.proto; skb->dev = tun->dev; break; case IFF_TAP: if (frags && !pskb_may_pull(skb, ETH_HLEN)) { err = -ENOMEM; drop_reason = SKB_DROP_REASON_HDR_TRUNC; goto drop; } skb->protocol = eth_type_trans(skb, tun->dev); break; } /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; uarg->callback(NULL, uarg, false); } skb_reset_network_header(skb); skb_probe_transport_header(skb); skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { struct bpf_prog *xdp_prog; int ret; local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { ret = do_xdp_generic(xdp_prog, skb); if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); goto unlock_frags; } } rcu_read_unlock(); local_bh_enable(); } /* Compute the costly rx hash only if needed for flow updates. * We may get a very small possibility of OOO during switching, not * worth to optimize. */ if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); rcu_read_lock(); if (unlikely(!(tun->dev->flags & IFF_UP))) { err = -EIO; rcu_read_unlock(); drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } if (frags) { u32 headlen; /* Exercise flow dissector code path. */ skb_push(skb, ETH_HLEN); headlen = eth_get_headlen(tun->dev, skb->data, skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { WARN_ON_ONCE(1); err = -ENOMEM; dev_core_stats_rx_dropped_inc(tun->dev); napi_busy: napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); return err; } if (likely(napi_schedule_prep(&tfile->napi))) { local_bh_disable(); napi_gro_frags(&tfile->napi); napi_complete(&tfile->napi); local_bh_enable(); } else { err = -EBUSY; goto napi_busy; } mutex_unlock(&tfile->napi_mutex); } else if (tfile->napi_enabled) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; int queue_len; spin_lock_bh(&queue->lock); if (unlikely(tfile->detached)) { spin_unlock_bh(&queue->lock); rcu_read_unlock(); err = -EBUSY; goto free_skb; } __skb_queue_tail(queue, skb); queue_len = skb_queue_len(queue); spin_unlock(&queue->lock); if (!more || queue_len > NAPI_POLL_WEIGHT) napi_schedule(&tfile->napi); local_bh_enable(); } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { tun_rx_batched(tun, tfile, skb, more); } else { netif_rx(skb); } rcu_read_unlock(); preempt_disable(); dev_sw_netstats_rx_add(tun->dev, len); preempt_enable(); if (rxhash) tun_flow_update(tun, rxhash, tfile); return total_len; drop: if (err != -EAGAIN) dev_core_stats_rx_dropped_inc(tun->dev); free_skb: if (!IS_ERR_OR_NULL(skb)) kfree_skb_reason(skb, drop_reason); unlock_frags: if (frags) { tfile->napi.skb = NULL; mutex_unlock(&tfile->napi_mutex); } return err ?: total_len; } static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t result; int noblock = 0; if (!tun) return -EBADFD; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; result = tun_get_user(tun, tfile, NULL, from, noblock, false); tun_put(tun); return result; } static ssize_t tun_put_user_xdp(struct tun_struct *tun, struct tun_file *tfile, struct xdp_frame *xdp_frame, struct iov_iter *iter) { int vnet_hdr_sz = 0; size_t size = xdp_frame->len; size_t ret; if (tun->flags & IFF_VNET_HDR) { struct virtio_net_hdr gso = { 0 }; vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) return -EINVAL; if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))) return -EFAULT; iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; preempt_disable(); dev_sw_netstats_tx_add(tun->dev, 1, ret); preempt_enable(); return ret; } /* Put packet to the user space buffer */ static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; ssize_t total; int vlan_offset = 0; int vlan_hlen = 0; int vnet_hdr_sz = 0; if (skb_vlan_tag_present(skb)) vlan_hlen = VLAN_HLEN; if (tun->flags & IFF_VNET_HDR) vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); total = skb->len + vlan_hlen + vnet_hdr_sz; if (!(tun->flags & IFF_NO_PI)) { if (iov_iter_count(iter) < sizeof(pi)) return -EINVAL; total += sizeof(pi); if (iov_iter_count(iter) < total) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) return -EFAULT; } if (vnet_hdr_sz) { struct virtio_net_hdr gso; if (iov_iter_count(iter) < vnet_hdr_sz) return -EINVAL; if (virtio_net_hdr_from_skb(skb, &gso, tun_is_little_endian(tun), true, vlan_hlen)) { struct skb_shared_info *sinfo = skb_shinfo(skb); pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), tun16_to_cpu(tun, gso.hdr_len)); print_hex_dump(KERN_ERR, "tun: ", DUMP_PREFIX_NONE, 16, 1, skb->head, min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); WARN_ON_ONCE(1); return -EINVAL; } if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) return -EFAULT; iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } if (vlan_hlen) { int ret; struct veth veth; veth.h_vlan_proto = skb->vlan_proto; veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); if (ret || !iov_iter_count(iter)) goto done; ret = copy_to_iter(&veth, sizeof(veth), iter); if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: /* caller is in process context, */ preempt_disable(); dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); preempt_enable(); return total; } static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) { DECLARE_WAITQUEUE(wait, current); void *ptr = NULL; int error = 0; ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) goto out; if (noblock) { error = -EAGAIN; goto out; } add_wait_queue(&tfile->socket.wq.wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) break; if (signal_pending(current)) { error = -ERESTARTSYS; break; } if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { error = -EFAULT; break; } schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&tfile->socket.wq.wait, &wait); out: *err = error; return ptr; } static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *to, int noblock, void *ptr) { ssize_t ret; int err; if (!iov_iter_count(to)) { tun_ptr_free(ptr); return 0; } if (!ptr) { /* Read frames from ring */ ptr = tun_ring_recv(tfile, noblock, &err); if (!ptr) return err; } if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); ret = tun_put_user_xdp(tun, tfile, xdpf, to); xdp_return_frame(xdpf); } else { struct sk_buff *skb = ptr; ret = tun_put_user(tun, tfile, skb, to); if (unlikely(ret < 0)) kfree_skb(skb); else consume_skb(skb); } return ret; } static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t len = iov_iter_count(to), ret; int noblock = 0; if (!tun) return -EBADFD; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; ret = tun_do_read(tun, tfile, to, noblock, NULL); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; tun_put(tun); return ret; } static void tun_prog_free(struct rcu_head *rcu) { struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); bpf_prog_destroy(prog->prog); kfree(prog); } static int __tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, struct bpf_prog *prog) { struct tun_prog *old, *new = NULL; if (prog) { new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) return -ENOMEM; new->prog = prog; } spin_lock_bh(&tun->lock); old = rcu_dereference_protected(*prog_p, lockdep_is_held(&tun->lock)); rcu_assign_pointer(*prog_p, new); spin_unlock_bh(&tun->lock); if (old) call_rcu(&old->rcu, tun_prog_free); return 0; } static void tun_free_netdev(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); BUG_ON(!(list_empty(&tun->disabled))); free_percpu(dev->tstats); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); __tun_set_ebpf(tun, &tun->steering_prog, NULL); __tun_set_ebpf(tun, &tun->filter_prog, NULL); } static void tun_setup(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); tun->owner = INVALID_UID; tun->group = INVALID_GID; tun_default_link_ksettings(dev, &tun->link_ksettings); dev->ethtool_ops = &tun_ethtool_ops; dev->needs_free_netdev = true; dev->priv_destructor = tun_free_netdev; /* We prefer our own queue length */ dev->tx_queue_len = TUN_READQ_SIZE; } /* Trivial set of netlink ops to allow deleting tun or tap * device with netlink. */ static int tun_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "tun/tap creation via rtnetlink is not supported."); return -EOPNOTSUPP; } static size_t tun_get_size(const struct net_device *dev) { BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); return nla_total_size(sizeof(uid_t)) + /* OWNER */ nla_total_size(sizeof(gid_t)) + /* GROUP */ nla_total_size(sizeof(u8)) + /* TYPE */ nla_total_size(sizeof(u8)) + /* PI */ nla_total_size(sizeof(u8)) + /* VNET_HDR */ nla_total_size(sizeof(u8)) + /* PERSIST */ nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 0; } static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) goto nla_put_failure; if (uid_valid(tun->owner) && nla_put_u32(skb, IFLA_TUN_OWNER, from_kuid_munged(current_user_ns(), tun->owner))) goto nla_put_failure; if (gid_valid(tun->group) && nla_put_u32(skb, IFLA_TUN_GROUP, from_kgid_munged(current_user_ns(), tun->group))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, !!(tun->flags & IFF_MULTI_QUEUE))) goto nla_put_failure; if (tun->flags & IFF_MULTI_QUEUE) { if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, tun->numdisabled)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops tun_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct tun_struct), .setup = tun_setup, .validate = tun_validate, .get_size = tun_get_size, .fill_info = tun_fill_info, }; static void tun_sock_write_space(struct sock *sk) { struct tun_file *tfile; wait_queue_head_t *wqueue; if (!sock_writeable(sk)) return; if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); tfile = container_of(sk, struct tun_file, sk); kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } static void tun_put_page(struct tun_page *tpage) { if (tpage->page) __page_frag_cache_drain(tpage->page, tpage->count); } static int tun_xdp_one(struct tun_struct *tun, struct tun_file *tfile, struct xdp_buff *xdp, int *flush, struct tun_page *tpage) { unsigned int datasize = xdp->data_end - xdp->data; struct tun_xdp_hdr *hdr = xdp->data_hard_start; struct virtio_net_hdr *gso = &hdr->gso; struct bpf_prog *xdp_prog; struct sk_buff *skb = NULL; struct sk_buff_head *queue; u32 rxhash = 0, act; int buflen = hdr->buflen; int ret = 0; bool skb_xdp = false; struct page *page; xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { if (gso->gso_type) { skb_xdp = true; goto build; } xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); xdp_set_data_meta_invalid(xdp); act = bpf_prog_run_xdp(xdp_prog, xdp); ret = tun_xdp_act(tun, xdp_prog, xdp, act); if (ret < 0) { put_page(virt_to_head_page(xdp->data)); return ret; } switch (ret) { case XDP_REDIRECT: *flush = true; fallthrough; case XDP_TX: return 0; case XDP_PASS: break; default: page = virt_to_head_page(xdp->data); if (tpage->page == page) { ++tpage->count; } else { tun_put_page(tpage); tpage->page = page; tpage->count = 1; } return 0; } } build: skb = build_skb(xdp->data_hard_start, buflen); if (!skb) { ret = -ENOMEM; goto out; } skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { atomic_long_inc(&tun->rx_frame_errors); kfree_skb(skb); ret = -EINVAL; goto out; } skb->protocol = eth_type_trans(skb, tun->dev); skb_reset_network_header(skb); skb_probe_transport_header(skb); skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { ret = do_xdp_generic(xdp_prog, skb); if (ret != XDP_PASS) { ret = 0; goto out; } } if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); if (tfile->napi_enabled) { queue = &tfile->sk.sk_write_queue; spin_lock(&queue->lock); if (unlikely(tfile->detached)) { spin_unlock(&queue->lock); kfree_skb(skb); return -EBUSY; } __skb_queue_tail(queue, skb); spin_unlock(&queue->lock); ret = 1; } else { netif_receive_skb(skb); ret = 0; } /* No need to disable preemption here since this function is * always called with bh disabled */ dev_sw_netstats_rx_add(tun->dev, datasize); if (rxhash) tun_flow_update(tun, rxhash, tfile); out: return ret; } static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { int ret, i; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); struct tun_msg_ctl *ctl = m->msg_control; struct xdp_buff *xdp; if (!tun) return -EBADFD; if (m->msg_controllen == sizeof(struct tun_msg_ctl) && ctl && ctl->type == TUN_MSG_PTR) { struct tun_page tpage; int n = ctl->num; int flush = 0, queued = 0; memset(&tpage, 0, sizeof(tpage)); local_bh_disable(); rcu_read_lock(); for (i = 0; i < n; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); if (ret > 0) queued += ret; } if (flush) xdp_do_flush(); if (tfile->napi_enabled && queued > 0) napi_schedule(&tfile->napi); rcu_read_unlock(); local_bh_enable(); tun_put_page(&tpage); ret = total_len; goto out; } ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); out: tun_put(tun); return ret; } static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); void *ptr = m->msg_control; int ret; if (!tun) { ret = -EBADFD; goto out_free; } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { ret = -EINVAL; goto out_put_tun; } if (flags & MSG_ERRQUEUE) { ret = sock_recv_errqueue(sock->sk, m, total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; } out: tun_put(tun); return ret; out_put_tun: tun_put(tun); out_free: tun_ptr_free(ptr); return ret; } static int tun_ptr_peek_len(void *ptr) { if (likely(ptr)) { if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); return xdpf->len; } return __skb_array_len_with_tag(ptr); } else { return 0; } } static int tun_peek_len(struct socket *sock) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun; int ret = 0; tun = tun_get(tfile); if (!tun) return 0; ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); tun_put(tun); return ret; } /* Ops structure to mimic raw sockets with tun */ static const struct proto_ops tun_socket_ops = { .peek_len = tun_peek_len, .sendmsg = tun_sendmsg, .recvmsg = tun_recvmsg, }; static struct proto tun_proto = { .name = "tun", .owner = THIS_MODULE, .obj_size = sizeof(struct tun_file), }; static int tun_flags(struct tun_struct *tun) { return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); } static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return sysfs_emit(buf, "0x%x\n", tun_flags(tun)); } static ssize_t owner_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return uid_valid(tun->owner)? sysfs_emit(buf, "%u\n", from_kuid_munged(current_user_ns(), tun->owner)) : sysfs_emit(buf, "-1\n"); } static ssize_t group_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return gid_valid(tun->group) ? sysfs_emit(buf, "%u\n", from_kgid_munged(current_user_ns(), tun->group)) : sysfs_emit(buf, "-1\n"); } static DEVICE_ATTR_RO(tun_flags); static DEVICE_ATTR_RO(owner); static DEVICE_ATTR_RO(group); static struct attribute *tun_dev_attrs[] = { &dev_attr_tun_flags.attr, &dev_attr_owner.attr, &dev_attr_group.attr, NULL }; static const struct attribute_group tun_attr_group = { .attrs = tun_dev_attrs }; static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) { struct tun_struct *tun; struct tun_file *tfile = file->private_data; struct net_device *dev; int err; if (tfile->detached) return -EINVAL; if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!(ifr->ifr_flags & IFF_NAPI) || (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) return -EINVAL; } dev = __dev_get_by_name(net, ifr->ifr_name); if (dev) { if (ifr->ifr_flags & IFF_TUN_EXCL) return -EBUSY; if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) tun = netdev_priv(dev); else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) tun = netdev_priv(dev); else return -EINVAL; if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != !!(tun->flags & IFF_MULTI_QUEUE)) return -EINVAL; if (tun_not_capable(tun)) return -EPERM; err = security_tun_dev_open(tun->security); if (err < 0) return err; err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, ifr->ifr_flags & IFF_NAPI, ifr->ifr_flags & IFF_NAPI_FRAGS, true); if (err < 0) return err; if (tun->flags & IFF_MULTI_QUEUE && (tun->numqueues + tun->numdisabled > 1)) { /* One or more queue has already been attached, no need * to initialize the device again. */ netdev_state_change(dev); return 0; } tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); netdev_state_change(dev); } else { char *name; unsigned long flags = 0; int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_create(); if (err < 0) return err; /* Set dev type */ if (ifr->ifr_flags & IFF_TUN) { /* TUN device */ flags |= IFF_TUN; name = "tun%d"; } else if (ifr->ifr_flags & IFF_TAP) { /* TAP device */ flags |= IFF_TAP; name = "tap%d"; } else return -EINVAL; if (*ifr->ifr_name) name = ifr->ifr_name; dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, NET_NAME_UNKNOWN, tun_setup, queues, queues); if (!dev) return -ENOMEM; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; dev->ifindex = tfile->ifindex; dev->sysfs_groups[0] = &tun_attr_group; tun = netdev_priv(dev); tun->dev = dev; tun->flags = flags; tun->txflt.count = 0; tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); tun->align = NET_SKB_PAD; tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); tun->ifr = ifr; tun->file = file; tun_net_initialize(dev); err = register_netdevice(tun->dev); if (err < 0) { free_netdev(dev); return err; } /* free_netdev() won't check refcnt, to avoid race * with dev_put() we need publish tun after registration. */ rcu_assign_pointer(tfile->tun, tun); } if (ifr->ifr_flags & IFF_NO_CARRIER) netif_carrier_off(tun->dev); else netif_carrier_on(tun->dev); /* Make sure persistent devices do not get stuck in * xoff state. */ if (netif_running(tun->dev)) netif_tx_wake_all_queues(tun->dev); strcpy(ifr->ifr_name, tun->dev->name); return 0; } static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) { strcpy(ifr->ifr_name, tun->dev->name); ifr->ifr_flags = tun_flags(tun); } /* This is like a cut-down ethtool ops, except done via tun fd so no * privs required. */ static int set_offload(struct tun_struct *tun, unsigned long arg) { netdev_features_t features = 0; if (arg & TUN_F_CSUM) { features |= NETIF_F_HW_CSUM; arg &= ~TUN_F_CSUM; if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { if (arg & TUN_F_TSO_ECN) { features |= NETIF_F_TSO_ECN; arg &= ~TUN_F_TSO_ECN; } if (arg & TUN_F_TSO4) features |= NETIF_F_TSO; if (arg & TUN_F_TSO6) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } arg &= ~TUN_F_UFO; /* TODO: for now USO4 and USO6 should work simultaneously */ if (arg & TUN_F_USO4 && arg & TUN_F_USO6) { features |= NETIF_F_GSO_UDP_L4; arg &= ~(TUN_F_USO4 | TUN_F_USO6); } } /* This gives the user a way to test for new features in future by * trying to set them. */ if (arg) return -EINVAL; tun->set_features = features; tun->dev->wanted_features &= ~TUN_USER_FEATURES; tun->dev->wanted_features |= features; netdev_update_features(tun->dev); return 0; } static void tun_detach_filter(struct tun_struct *tun, int n) { int i; struct tun_file *tfile; for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); lock_sock(tfile->socket.sk); sk_detach_filter(tfile->socket.sk); release_sock(tfile->socket.sk); } tun->filter_attached = false; } static int tun_attach_filter(struct tun_struct *tun) { int i, ret = 0; struct tun_file *tfile; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); lock_sock(tfile->socket.sk); ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); release_sock(tfile->socket.sk); if (ret) { tun_detach_filter(tun, i); return ret; } } tun->filter_attached = true; return ret; } static void tun_set_sndbuf(struct tun_struct *tun) { struct tun_file *tfile; int i; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); tfile->socket.sk->sk_sndbuf = tun->sndbuf; } } static int tun_set_queue(struct file *file, struct ifreq *ifr) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; int ret = 0; rtnl_lock(); if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { tun = tfile->detached; if (!tun) { ret = -EINVAL; goto unlock; } ret = security_tun_dev_attach_queue(tun->security); if (ret < 0) goto unlock; ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, tun->flags & IFF_NAPI_FRAGS, true); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) ret = -EINVAL; else __tun_detach(tfile, false); } else ret = -EINVAL; if (ret >= 0) netdev_state_change(tun->dev); unlock: rtnl_unlock(); return ret; } static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, void __user *data) { struct bpf_prog *prog; int fd; if (copy_from_user(&fd, data, sizeof(fd))) return -EFAULT; if (fd == -1) { prog = NULL; } else { prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog)) return PTR_ERR(prog); } return __tun_set_ebpf(tun, prog_p, prog); } /* Return correct value for tun->dev->addr_len based on tun->dev->type. */ static unsigned char tun_get_addr_len(unsigned short type) { switch (type) { case ARPHRD_IP6GRE: case ARPHRD_TUNNEL6: return sizeof(struct in6_addr); case ARPHRD_IPGRE: case ARPHRD_TUNNEL: case ARPHRD_SIT: return 4; case ARPHRD_ETHER: return ETH_ALEN; case ARPHRD_IEEE802154: case ARPHRD_IEEE802154_MONITOR: return IEEE802154_EXTENDED_ADDR_LEN; case ARPHRD_PHONET_PIPE: case ARPHRD_PPP: case ARPHRD_NONE: return 0; case ARPHRD_6LOWPAN: return EUI64_ADDR_LEN; case ARPHRD_FDDI: return FDDI_K_ALEN; case ARPHRD_HIPPI: return HIPPI_ALEN; case ARPHRD_IEEE802: return FC_ALEN; case ARPHRD_ROSE: return ROSE_ADDR_LEN; case ARPHRD_NETROM: return AX25_ADDR_LEN; case ARPHRD_LOCALTLK: return LTALK_ALEN; default: return 0; } } static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { struct tun_file *tfile = file->private_data; struct net *net = sock_net(&tfile->sk); struct tun_struct *tun; void __user* argp = (void __user*)arg; unsigned int ifindex, carrier; struct ifreq ifr; kuid_t owner; kgid_t group; int sndbuf; int vnet_hdr_sz; int le; int ret; bool do_notify = false; if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; } else { memset(&ifr, 0, sizeof(ifr)); } if (cmd == TUNGETFEATURES) { /* Currently this just means: "what IFF flags are valid?". * This is needed because we never checked for invalid flags on * TUNSETIFF. */ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER | TUN_FEATURES, (unsigned int __user*)argp); } else if (cmd == TUNSETQUEUE) { return tun_set_queue(file, &ifr); } else if (cmd == SIOCGSKNS) { if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; return open_related_ns(&net->ns, get_net_ns); } rtnl_lock(); tun = tun_get(tfile); if (cmd == TUNSETIFF) { ret = -EEXIST; if (tun) goto unlock; ifr.ifr_name[IFNAMSIZ-1] = '\0'; ret = tun_set_iff(net, file, &ifr); if (ret) goto unlock; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; goto unlock; } if (cmd == TUNSETIFINDEX) { ret = -EPERM; if (tun) goto unlock; ret = -EFAULT; if (copy_from_user(&ifindex, argp, sizeof(ifindex))) goto unlock; ret = 0; tfile->ifindex = ifindex; goto unlock; } ret = -EBADFD; if (!tun) goto unlock; netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); net = dev_net(tun->dev); ret = 0; switch (cmd) { case TUNGETIFF: tun_get_iff(tun, &ifr); if (tfile->detached) ifr.ifr_flags |= IFF_DETACH_QUEUE; if (!tfile->socket.sk->sk_filter) ifr.ifr_flags |= IFF_NOFILTER; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case TUNSETNOCSUM: /* Disable/Enable checksum */ /* [unimplemented] */ netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", arg ? "disabled" : "enabled"); break; case TUNSETPERSIST: /* Disable/Enable persist mode. Keep an extra reference to the * module to prevent the module being unprobed. */ if (arg && !(tun->flags & IFF_PERSIST)) { tun->flags |= IFF_PERSIST; __module_get(THIS_MODULE); do_notify = true; } if (!arg && (tun->flags & IFF_PERSIST)) { tun->flags &= ~IFF_PERSIST; module_put(THIS_MODULE); do_notify = true; } netif_info(tun, drv, tun->dev, "persist %s\n", arg ? "enabled" : "disabled"); break; case TUNSETOWNER: /* Set owner of the device */ owner = make_kuid(current_user_ns(), arg); if (!uid_valid(owner)) { ret = -EINVAL; break; } tun->owner = owner; do_notify = true; netif_info(tun, drv, tun->dev, "owner set to %u\n", from_kuid(&init_user_ns, tun->owner)); break; case TUNSETGROUP: /* Set group of the device */ group = make_kgid(current_user_ns(), arg); if (!gid_valid(group)) { ret = -EINVAL; break; } tun->group = group; do_notify = true; netif_info(tun, drv, tun->dev, "group set to %u\n", from_kgid(&init_user_ns, tun->group)); break; case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { netif_info(tun, drv, tun->dev, "Linktype set failed because interface is up\n"); ret = -EBUSY; } else { ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, tun->dev); ret = notifier_to_errno(ret); if (ret) { netif_info(tun, drv, tun->dev, "Refused to change device type\n"); break; } tun->dev->type = (int) arg; tun->dev->addr_len = tun_get_addr_len(tun->dev->type); netif_info(tun, drv, tun->dev, "linktype set to %d\n", tun->dev->type); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, tun->dev); } break; case TUNSETDEBUG: tun->msg_enable = (u32)arg; break; case TUNSETOFFLOAD: ret = set_offload(tun, arg); break; case TUNSETTXFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = update_filter(&tun->txflt, (void __user *)arg); break; case SIOCGIFHWADDR: /* Get hw address */ dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case SIOCSIFHWADDR: /* Set hw address */ ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL); break; case TUNGETSNDBUF: sndbuf = tfile->socket.sk->sk_sndbuf; if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) ret = -EFAULT; break; case TUNSETSNDBUF: if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { ret = -EFAULT; break; } if (sndbuf <= 0) { ret = -EINVAL; break; } tun->sndbuf = sndbuf; tun_set_sndbuf(tun); break; case TUNGETVNETHDRSZ: vnet_hdr_sz = tun->vnet_hdr_sz; if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) ret = -EFAULT; break; case TUNSETVNETHDRSZ: if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { ret = -EFAULT; break; } if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { ret = -EINVAL; break; } tun->vnet_hdr_sz = vnet_hdr_sz; break; case TUNGETVNETLE: le = !!(tun->flags & TUN_VNET_LE); if (put_user(le, (int __user *)argp)) ret = -EFAULT; break; case TUNSETVNETLE: if (get_user(le, (int __user *)argp)) { ret = -EFAULT; break; } if (le) tun->flags |= TUN_VNET_LE; else tun->flags &= ~TUN_VNET_LE; break; case TUNGETVNETBE: ret = tun_get_vnet_be(tun, argp); break; case TUNSETVNETBE: ret = tun_set_vnet_be(tun, argp); break; case TUNATTACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) break; ret = tun_attach_filter(tun); break; case TUNDETACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = 0; tun_detach_filter(tun, tun->numqueues); break; case TUNGETFILTER: ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) break; ret = 0; break; case TUNSETSTEERINGEBPF: ret = tun_set_ebpf(tun, &tun->steering_prog, argp); break; case TUNSETFILTEREBPF: ret = tun_set_ebpf(tun, &tun->filter_prog, argp); break; case TUNSETCARRIER: ret = -EFAULT; if (copy_from_user(&carrier, argp, sizeof(carrier))) goto unlock; ret = tun_net_change_carrier(tun->dev, (bool)carrier); break; case TUNGETDEVNETNS: ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto unlock; ret = open_related_ns(&net->ns, get_net_ns); break; default: ret = -EINVAL; break; } if (do_notify) netdev_state_change(tun->dev); unlock: rtnl_unlock(); if (tun) tun_put(tun); return ret; } static long tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); } #ifdef CONFIG_COMPAT static long tun_chr_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case TUNSETIFF: case TUNGETIFF: case TUNSETTXFILTER: case TUNGETSNDBUF: case TUNSETSNDBUF: case SIOCGIFHWADDR: case SIOCSIFHWADDR: arg = (unsigned long)compat_ptr(arg); break; default: arg = (compat_ulong_t)arg; break; } /* * compat_ifreq is shorter than ifreq, so we must not access beyond * the end of that structure. All fields that are used in this * driver are compatible though, we don't need to convert the * contents. */ return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); } #endif /* CONFIG_COMPAT */ static int tun_chr_fasync(int fd, struct file *file, int on) { struct tun_file *tfile = file->private_data; int ret; if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) goto out; if (on) { __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); tfile->flags |= TUN_FASYNC; } else tfile->flags &= ~TUN_FASYNC; ret = 0; out: return ret; } static int tun_chr_open(struct inode *inode, struct file * file) { struct net *net = current->nsproxy->net_ns; struct tun_file *tfile; tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto, 0); if (!tfile) return -ENOMEM; if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { sk_free(&tfile->sk); return -ENOMEM; } mutex_init(&tfile->napi_mutex); RCU_INIT_POINTER(tfile->tun, NULL); tfile->flags = 0; tfile->ifindex = 0; init_waitqueue_head(&tfile->socket.wq.wait); tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; file->private_data = tfile; INIT_LIST_HEAD(&tfile->next); sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); /* tun groks IOCB_NOWAIT just fine, mark it as such */ file->f_mode |= FMODE_NOWAIT; return 0; } static int tun_chr_close(struct inode *inode, struct file *file) { struct tun_file *tfile = file->private_data; tun_detach(tfile, true); return 0; } #ifdef CONFIG_PROC_FS static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; struct ifreq ifr; memset(&ifr, 0, sizeof(ifr)); rtnl_lock(); tun = tun_get(tfile); if (tun) tun_get_iff(tun, &ifr); rtnl_unlock(); if (tun) tun_put(tun); seq_printf(m, "iff:\t%s\n", ifr.ifr_name); } #endif static const struct file_operations tun_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read_iter = tun_chr_read_iter, .write_iter = tun_chr_write_iter, .poll = tun_chr_poll, .unlocked_ioctl = tun_chr_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = tun_chr_compat_ioctl, #endif .open = tun_chr_open, .release = tun_chr_close, .fasync = tun_chr_fasync, #ifdef CONFIG_PROC_FS .show_fdinfo = tun_chr_show_fdinfo, #endif }; static struct miscdevice tun_miscdev = { .minor = TUN_MINOR, .name = "tun", .nodename = "net/tun", .fops = &tun_fops, }; /* ethtool interface */ static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); cmd->base.speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_DISABLE; } static int tun_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct tun_struct *tun = netdev_priv(dev); memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); return 0; } static int tun_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct tun_struct *tun = netdev_priv(dev); memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); return 0; } static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tun_struct *tun = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: strscpy(info->bus_info, "tun", sizeof(info->bus_info)); break; case IFF_TAP: strscpy(info->bus_info, "tap", sizeof(info->bus_info)); break; } } static u32 tun_get_msglevel(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); return tun->msg_enable; } static void tun_set_msglevel(struct net_device *dev, u32 value) { struct tun_struct *tun = netdev_priv(dev); tun->msg_enable = value; } static int tun_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); ec->rx_max_coalesced_frames = tun->rx_batched; return 0; } static int tun_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) tun->rx_batched = NAPI_POLL_WEIGHT; else tun->rx_batched = ec->rx_max_coalesced_frames; return 0; } static const struct ethtool_ops tun_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, .get_drvinfo = tun_get_drvinfo, .get_msglevel = tun_get_msglevel, .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_coalesce = tun_get_coalesce, .set_coalesce = tun_set_coalesce, .get_link_ksettings = tun_get_link_ksettings, .set_link_ksettings = tun_set_link_ksettings, }; static int tun_queue_resize(struct tun_struct *tun) { struct net_device *dev = tun->dev; struct tun_file *tfile; struct ptr_ring **rings; int n = tun->numqueues + tun->numdisabled; int ret, i; rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); if (!rings) return -ENOMEM; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); rings[i] = &tfile->tx_ring; } list_for_each_entry(tfile, &tun->disabled, next) rings[i++] = &tfile->tx_ring; ret = ptr_ring_resize_multiple(rings, n, dev->tx_queue_len, GFP_KERNEL, tun_ptr_free); kfree(rings); return ret; } static int tun_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct tun_struct *tun = netdev_priv(dev); int i; if (dev->rtnl_link_ops != &tun_link_ops) return NOTIFY_DONE; switch (event) { case NETDEV_CHANGE_TX_QUEUE_LEN: if (tun_queue_resize(tun)) return NOTIFY_BAD; break; case NETDEV_UP: for (i = 0; i < tun->numqueues; i++) { struct tun_file *tfile; tfile = rtnl_dereference(tun->tfiles[i]); tfile->socket.sk->sk_write_space(tfile->socket.sk); } break; default: break; } return NOTIFY_DONE; } static struct notifier_block tun_notifier_block __read_mostly = { .notifier_call = tun_device_event, }; static int __init tun_init(void) { int ret = 0; pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); ret = rtnl_link_register(&tun_link_ops); if (ret) { pr_err("Can't register link_ops\n"); goto err_linkops; } ret = misc_register(&tun_miscdev); if (ret) { pr_err("Can't register misc device %d\n", TUN_MINOR); goto err_misc; } ret = register_netdevice_notifier(&tun_notifier_block); if (ret) { pr_err("Can't register netdevice notifier\n"); goto err_notifier; } return 0; err_notifier: misc_deregister(&tun_miscdev); err_misc: rtnl_link_unregister(&tun_link_ops); err_linkops: return ret; } static void __exit tun_cleanup(void) { misc_deregister(&tun_miscdev); rtnl_link_unregister(&tun_link_ops); unregister_netdevice_notifier(&tun_notifier_block); } /* Get an underlying socket object from tun file. Returns error unless file is * attached to a device. The returned object works like a packet socket, it * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for * holding a reference to the file for as long as the socket is in use. */ struct socket *tun_get_socket(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->socket; } EXPORT_SYMBOL_GPL(tun_get_socket); struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->tx_ring; } EXPORT_SYMBOL_GPL(tun_get_tx_ring); module_init(tun_init); module_exit(tun_cleanup); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(TUN_MINOR); MODULE_ALIAS("devname:net/tun");
linux-master
drivers/net/tun.c
// SPDX-License-Identifier: GPL-2.0-only /* dummy.c: a dummy net driver The purpose of this driver is to provide a device to point a route through, but not to actually transmit packets. Why? If you have a machine whose only connection is an occasional PPP/SLIP/PLIP link, you can only connect to your own hostname when the link is up. Otherwise you have to use localhost. This isn't very consistent. One solution is to set up a dummy link using PPP/SLIP/PLIP, but this seems (to me) too much overhead for too little gain. This driver provides a small alternative. Thus you can do [when not running slip] ifconfig dummy slip.addr.ess.here up [to go to slip] ifconfig dummy down dip whatever This was written by looking at Donald Becker's skeleton driver and the loopback driver. I then threw away anything that didn't apply! Thanks to Alan Cox for the key clue on what to do with misguided packets. Nick Holloway, 27th May 1994 [I tweaked this explanation a little but that's all] Alan Cox, 30th May 1994 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/rtnetlink.h> #include <linux/net_tstamp.h> #include <net/rtnetlink.h> #include <linux/u64_stats_sync.h> #define DRV_NAME "dummy" static int numdummies = 1; /* fake multicast ability */ static void set_multicast_list(struct net_device *dev) { } static void dummy_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { dev_lstats_read(dev, &stats->tx_packets, &stats->tx_bytes); } static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) { dev_lstats_add(dev, skb->len); skb_tx_timestamp(skb); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int dummy_dev_init(struct net_device *dev) { dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); if (!dev->lstats) return -ENOMEM; return 0; } static void dummy_dev_uninit(struct net_device *dev) { free_percpu(dev->lstats); } static int dummy_change_carrier(struct net_device *dev, bool new_carrier) { if (new_carrier) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, .ndo_uninit = dummy_dev_uninit, .ndo_start_xmit = dummy_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = dummy_get_stats64, .ndo_change_carrier = dummy_change_carrier, }; static const struct ethtool_ops dummy_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static void dummy_setup(struct net_device *dev) { ether_setup(dev); /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; dev->ethtool_ops = &dummy_ethtool_ops; dev->needs_free_netdev = true; /* Fill in device structure with ethernet-generic values. */ dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; dev->features |= NETIF_F_GSO_SOFTWARE; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; dev->features |= NETIF_F_GSO_ENCAP_ALL; dev->hw_features |= dev->features; dev->hw_enc_features |= dev->features; eth_hw_addr_random(dev); dev->min_mtu = 0; dev->max_mtu = 0; } static int dummy_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } return 0; } static struct rtnl_link_ops dummy_link_ops __read_mostly = { .kind = DRV_NAME, .setup = dummy_setup, .validate = dummy_validate, }; /* Number of dummy devices to be set up by this module. */ module_param(numdummies, int, 0); MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); static int __init dummy_init_one(void) { struct net_device *dev_dummy; int err; dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_ENUM, dummy_setup); if (!dev_dummy) return -ENOMEM; dev_dummy->rtnl_link_ops = &dummy_link_ops; err = register_netdevice(dev_dummy); if (err < 0) goto err; return 0; err: free_netdev(dev_dummy); return err; } static int __init dummy_init_module(void) { int i, err = 0; down_write(&pernet_ops_rwsem); rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); if (err < 0) goto out; for (i = 0; i < numdummies && !err; i++) { err = dummy_init_one(); cond_resched(); } if (err < 0) __rtnl_link_unregister(&dummy_link_ops); out: rtnl_unlock(); up_write(&pernet_ops_rwsem); return err; } static void __exit dummy_cleanup_module(void) { rtnl_link_unregister(&dummy_link_ops); } module_init(dummy_init_module); module_exit(dummy_cleanup_module); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
linux-master
drivers/net/dummy.c
// SPDX-License-Identifier: GPL-2.0-only /* * GENEVE: Generic Network Virtualization Encapsulation * * Copyright (c) 2015 Red Hat, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/hash.h> #include <net/ipv6_stubs.h> #include <net/dst_metadata.h> #include <net/gro_cells.h> #include <net/rtnetlink.h> #include <net/geneve.h> #include <net/gro.h> #include <net/protocol.h> #define GENEVE_NETDEV_VER "0.6" #define GENEVE_N_VID (1u << 24) #define GENEVE_VID_MASK (GENEVE_N_VID - 1) #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); #define GENEVE_VER 0 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) #define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN) #define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN) /* per-network namespace private data for this module */ struct geneve_net { struct list_head geneve_list; struct list_head sock_list; }; static unsigned int geneve_net_id; struct geneve_dev_node { struct hlist_node hlist; struct geneve_dev *geneve; }; struct geneve_config { struct ip_tunnel_info info; bool collect_md; bool use_udp6_rx_checksums; bool ttl_inherit; enum ifla_geneve_df df; bool inner_proto_inherit; }; /* Pseudo network device */ struct geneve_dev { struct geneve_dev_node hlist4; /* vni hash table for IPv4 socket */ #if IS_ENABLED(CONFIG_IPV6) struct geneve_dev_node hlist6; /* vni hash table for IPv6 socket */ #endif struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */ #if IS_ENABLED(CONFIG_IPV6) struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */ #endif struct list_head next; /* geneve's per namespace list */ struct gro_cells gro_cells; struct geneve_config cfg; }; struct geneve_sock { bool collect_md; struct list_head list; struct socket *sock; struct rcu_head rcu; int refcnt; struct hlist_head vni_list[VNI_HASH_SIZE]; }; static inline __u32 geneve_net_vni_hash(u8 vni[3]) { __u32 vnid; vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2]; return hash_32(vnid, VNI_HASH_BITS); } static __be64 vni_to_tunnel_id(const __u8 *vni) { #ifdef __BIG_ENDIAN return (vni[0] << 16) | (vni[1] << 8) | vni[2]; #else return (__force __be64)(((__force u64)vni[0] << 40) | ((__force u64)vni[1] << 48) | ((__force u64)vni[2] << 56)); #endif } /* Convert 64 bit tunnel ID to 24 bit VNI. */ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) { #ifdef __BIG_ENDIAN vni[0] = (__force __u8)(tun_id >> 16); vni[1] = (__force __u8)(tun_id >> 8); vni[2] = (__force __u8)tun_id; #else vni[0] = (__force __u8)((__force u64)tun_id >> 40); vni[1] = (__force __u8)((__force u64)tun_id >> 48); vni[2] = (__force __u8)((__force u64)tun_id >> 56); #endif } static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) { return !memcmp(vni, &tun_id[5], 3); } static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) { return gs->sock->sk->sk_family; } static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, __be32 addr, u8 vni[]) { struct hlist_head *vni_list_head; struct geneve_dev_node *node; __u32 hash; /* Find the device for this VNI */ hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; hlist_for_each_entry_rcu(node, vni_list_head, hlist) { if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) && addr == node->geneve->cfg.info.key.u.ipv4.dst) return node->geneve; } return NULL; } #if IS_ENABLED(CONFIG_IPV6) static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs, struct in6_addr addr6, u8 vni[]) { struct hlist_head *vni_list_head; struct geneve_dev_node *node; __u32 hash; /* Find the device for this VNI */ hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; hlist_for_each_entry_rcu(node, vni_list_head, hlist) { if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) && ipv6_addr_equal(&addr6, &node->geneve->cfg.info.key.u.ipv6.dst)) return node->geneve; } return NULL; } #endif static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) { return (struct genevehdr *)(udp_hdr(skb) + 1); } static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs, struct sk_buff *skb) { static u8 zero_vni[3]; u8 *vni; if (geneve_get_sk_family(gs) == AF_INET) { struct iphdr *iph; __be32 addr; iph = ip_hdr(skb); /* outer IP header... */ if (gs->collect_md) { vni = zero_vni; addr = 0; } else { vni = geneve_hdr(skb)->vni; addr = iph->saddr; } return geneve_lookup(gs, addr, vni); #if IS_ENABLED(CONFIG_IPV6) } else if (geneve_get_sk_family(gs) == AF_INET6) { static struct in6_addr zero_addr6; struct ipv6hdr *ip6h; struct in6_addr addr6; ip6h = ipv6_hdr(skb); /* outer IPv6 header... */ if (gs->collect_md) { vni = zero_vni; addr6 = zero_addr6; } else { vni = geneve_hdr(skb)->vni; addr6 = ip6h->saddr; } return geneve6_lookup(gs, addr6, vni); #endif } return NULL; } /* geneve receive/decap routine */ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, struct sk_buff *skb) { struct genevehdr *gnvh = geneve_hdr(skb); struct metadata_dst *tun_dst = NULL; unsigned int len; int err = 0; void *oiph; if (ip_tunnel_collect_metadata() || gs->collect_md) { __be16 flags; flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) | (gnvh->critical ? TUNNEL_CRIT_OPT : 0); tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4); if (!tun_dst) { geneve->dev->stats.rx_dropped++; goto drop; } /* Update tunnel dst according to Geneve options. */ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, gnvh->options, gnvh->opt_len * 4, TUNNEL_GENEVE_OPT); } else { /* Drop packets w/ critical options, * since we don't support any... */ if (gnvh->critical) { geneve->dev->stats.rx_frame_errors++; geneve->dev->stats.rx_errors++; goto drop; } } if (tun_dst) skb_dst_set(skb, &tun_dst->dst); if (gnvh->proto_type == htons(ETH_P_TEB)) { skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, geneve->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); /* Ignore packet loops (and multicast echo) */ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) { geneve->dev->stats.rx_errors++; goto drop; } } else { skb_reset_mac_header(skb); skb->dev = geneve->dev; skb->pkt_type = PACKET_HOST; } oiph = skb_network_header(skb); skb_reset_network_header(skb); if (geneve_get_sk_family(gs) == AF_INET) err = IP_ECN_decapsulate(oiph, skb); #if IS_ENABLED(CONFIG_IPV6) else err = IP6_ECN_decapsulate(oiph, skb); #endif if (unlikely(err)) { if (log_ecn_error) { if (geneve_get_sk_family(gs) == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", &((struct iphdr *)oiph)->saddr, ((struct iphdr *)oiph)->tos); #if IS_ENABLED(CONFIG_IPV6) else net_info_ratelimited("non-ECT from %pI6\n", &((struct ipv6hdr *)oiph)->saddr); #endif } if (err > 1) { ++geneve->dev->stats.rx_frame_errors; ++geneve->dev->stats.rx_errors; goto drop; } } len = skb->len; err = gro_cells_receive(&geneve->gro_cells, skb); if (likely(err == NET_RX_SUCCESS)) dev_sw_netstats_rx_add(geneve->dev, len); return; drop: /* Consume bad packet */ kfree_skb(skb); } /* Setup stats when device is created */ static int geneve_init(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); int err; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; err = gro_cells_init(&geneve->gro_cells, dev); if (err) { free_percpu(dev->tstats); return err; } err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL); if (err) { free_percpu(dev->tstats); gro_cells_destroy(&geneve->gro_cells); return err; } return 0; } static void geneve_uninit(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); dst_cache_destroy(&geneve->cfg.info.dst_cache); gro_cells_destroy(&geneve->gro_cells); free_percpu(dev->tstats); } /* Callback from net/ipv4/udp.c to receive packets */ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct genevehdr *geneveh; struct geneve_dev *geneve; struct geneve_sock *gs; __be16 inner_proto; int opts_len; /* Need UDP and Geneve header to be present */ if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) goto drop; /* Return packets with reserved bits set */ geneveh = geneve_hdr(skb); if (unlikely(geneveh->ver != GENEVE_VER)) goto drop; gs = rcu_dereference_sk_user_data(sk); if (!gs) goto drop; geneve = geneve_lookup_skb(gs, skb); if (!geneve) goto drop; inner_proto = geneveh->proto_type; if (unlikely((!geneve->cfg.inner_proto_inherit && inner_proto != htons(ETH_P_TEB)))) { geneve->dev->stats.rx_dropped++; goto drop; } opts_len = geneveh->opt_len * 4; if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto, !net_eq(geneve->net, dev_net(geneve->dev)))) { geneve->dev->stats.rx_dropped++; goto drop; } geneve_rx(geneve, gs, skb); return 0; drop: /* Consume bad packet */ kfree_skb(skb); return 0; } /* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */ static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb) { struct genevehdr *geneveh; struct geneve_sock *gs; u8 zero_vni[3] = { 0 }; u8 *vni = zero_vni; if (!pskb_may_pull(skb, skb_transport_offset(skb) + GENEVE_BASE_HLEN)) return -EINVAL; geneveh = geneve_hdr(skb); if (geneveh->ver != GENEVE_VER) return -EINVAL; if (geneveh->proto_type != htons(ETH_P_TEB)) return -EINVAL; gs = rcu_dereference_sk_user_data(sk); if (!gs) return -ENOENT; if (geneve_get_sk_family(gs) == AF_INET) { struct iphdr *iph = ip_hdr(skb); __be32 addr4 = 0; if (!gs->collect_md) { vni = geneve_hdr(skb)->vni; addr4 = iph->daddr; } return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT; } #if IS_ENABLED(CONFIG_IPV6) if (geneve_get_sk_family(gs) == AF_INET6) { struct ipv6hdr *ip6h = ipv6_hdr(skb); struct in6_addr addr6; memset(&addr6, 0, sizeof(struct in6_addr)); if (!gs->collect_md) { vni = geneve_hdr(skb)->vni; addr6 = ip6h->daddr; } return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT; } #endif return -EPFNOSUPPORT; } static struct socket *geneve_create_sock(struct net *net, bool ipv6, __be16 port, bool ipv6_rx_csum) { struct socket *sock; struct udp_port_cfg udp_conf; int err; memset(&udp_conf, 0, sizeof(udp_conf)); if (ipv6) { udp_conf.family = AF_INET6; udp_conf.ipv6_v6only = 1; udp_conf.use_udp6_rx_checksums = ipv6_rx_csum; } else { udp_conf.family = AF_INET; udp_conf.local_ip.s_addr = htonl(INADDR_ANY); } udp_conf.local_udp_port = port; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); if (err < 0) return ERR_PTR(err); udp_allow_gso(sock->sk); return sock; } static int geneve_hlen(struct genevehdr *gh) { return sizeof(*gh) + gh->opt_len * 4; } static struct sk_buff *geneve_gro_receive(struct sock *sk, struct list_head *head, struct sk_buff *skb) { struct sk_buff *pp = NULL; struct sk_buff *p; struct genevehdr *gh, *gh2; unsigned int hlen, gh_len, off_gnv; const struct packet_offload *ptype; __be16 type; int flush = 1; off_gnv = skb_gro_offset(skb); hlen = off_gnv + sizeof(*gh); gh = skb_gro_header(skb, hlen, off_gnv); if (unlikely(!gh)) goto out; if (gh->ver != GENEVE_VER || gh->oam) goto out; gh_len = geneve_hlen(gh); hlen = off_gnv + gh_len; if (skb_gro_header_hard(skb, hlen)) { gh = skb_gro_header_slow(skb, hlen, off_gnv); if (unlikely(!gh)) goto out; } list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; gh2 = (struct genevehdr *)(p->data + off_gnv); if (gh->opt_len != gh2->opt_len || memcmp(gh, gh2, gh_len)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); type = gh->proto_type; if (likely(type == htons(ETH_P_TEB))) return call_gro_receive(eth_gro_receive, head, skb); ptype = gro_find_receive_by_type(type); if (!ptype) goto out; pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; out: skb_gro_flush_final(skb, pp, flush); return pp; } static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) { struct genevehdr *gh; struct packet_offload *ptype; __be16 type; int gh_len; int err = -ENOSYS; gh = (struct genevehdr *)(skb->data + nhoff); gh_len = geneve_hlen(gh); type = gh->proto_type; /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */ if (likely(type == htons(ETH_P_TEB))) return eth_gro_complete(skb, nhoff + gh_len); ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); skb_set_inner_mac_header(skb, nhoff + gh_len); return err; } /* Create new listen socket if needed */ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, bool ipv6, bool ipv6_rx_csum) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; struct socket *sock; struct udp_tunnel_sock_cfg tunnel_cfg; int h; gs = kzalloc(sizeof(*gs), GFP_KERNEL); if (!gs) return ERR_PTR(-ENOMEM); sock = geneve_create_sock(net, ipv6, port, ipv6_rx_csum); if (IS_ERR(sock)) { kfree(gs); return ERR_CAST(sock); } gs->sock = sock; gs->refcnt = 1; for (h = 0; h < VNI_HASH_SIZE; ++h) INIT_HLIST_HEAD(&gs->vni_list[h]); /* Initialize the geneve udp offloads structure */ udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); /* Mark socket as an encapsulation socket */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); tunnel_cfg.sk_user_data = gs; tunnel_cfg.encap_type = 1; tunnel_cfg.gro_receive = geneve_gro_receive; tunnel_cfg.gro_complete = geneve_gro_complete; tunnel_cfg.encap_rcv = geneve_udp_encap_recv; tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); list_add(&gs->list, &gn->sock_list); return gs; } static void __geneve_sock_release(struct geneve_sock *gs) { if (!gs || --gs->refcnt) return; list_del(&gs->list); udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); udp_tunnel_sock_release(gs->sock); kfree_rcu(gs, rcu); } static void geneve_sock_release(struct geneve_dev *geneve) { struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4); #if IS_ENABLED(CONFIG_IPV6) struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6); rcu_assign_pointer(geneve->sock6, NULL); #endif rcu_assign_pointer(geneve->sock4, NULL); synchronize_net(); __geneve_sock_release(gs4); #if IS_ENABLED(CONFIG_IPV6) __geneve_sock_release(gs6); #endif } static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, sa_family_t family, __be16 dst_port) { struct geneve_sock *gs; list_for_each_entry(gs, &gn->sock_list, list) { if (inet_sk(gs->sock->sk)->inet_sport == dst_port && geneve_get_sk_family(gs) == family) { return gs; } } return NULL; } static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) { struct net *net = geneve->net; struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev_node *node; struct geneve_sock *gs; __u8 vni[3]; __u32 hash; gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->cfg.info.key.tp_dst); if (gs) { gs->refcnt++; goto out; } gs = geneve_socket_create(net, geneve->cfg.info.key.tp_dst, ipv6, geneve->cfg.use_udp6_rx_checksums); if (IS_ERR(gs)) return PTR_ERR(gs); out: gs->collect_md = geneve->cfg.collect_md; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) { rcu_assign_pointer(geneve->sock6, gs); node = &geneve->hlist6; } else #endif { rcu_assign_pointer(geneve->sock4, gs); node = &geneve->hlist4; } node->geneve = geneve; tunnel_id_to_vni(geneve->cfg.info.key.tun_id, vni); hash = geneve_net_vni_hash(vni); hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]); return 0; } static int geneve_open(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); bool metadata = geneve->cfg.collect_md; bool ipv4, ipv6; int ret = 0; ipv6 = geneve->cfg.info.mode & IP_TUNNEL_INFO_IPV6 || metadata; ipv4 = !ipv6 || metadata; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) { ret = geneve_sock_add(geneve, true); if (ret < 0 && ret != -EAFNOSUPPORT) ipv4 = false; } #endif if (ipv4) ret = geneve_sock_add(geneve, false); if (ret < 0) geneve_sock_release(geneve); return ret; } static int geneve_stop(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); hlist_del_init_rcu(&geneve->hlist4.hlist); #if IS_ENABLED(CONFIG_IPV6) hlist_del_init_rcu(&geneve->hlist6.hlist); #endif geneve_sock_release(geneve); return 0; } static void geneve_build_header(struct genevehdr *geneveh, const struct ip_tunnel_info *info, __be16 inner_proto) { geneveh->ver = GENEVE_VER; geneveh->opt_len = info->options_len / 4; geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM); geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT); geneveh->rsvd1 = 0; tunnel_id_to_vni(info->key.tun_id, geneveh->vni); geneveh->proto_type = inner_proto; geneveh->rsvd2 = 0; if (info->key.tun_flags & TUNNEL_GENEVE_OPT) ip_tunnel_info_opts_get(geneveh->options, info); } static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, const struct ip_tunnel_info *info, bool xnet, int ip_hdr_len, bool inner_proto_inherit) { bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); struct genevehdr *gnvh; __be16 inner_proto; int min_headroom; int err; skb_reset_mac_header(skb); skb_scrub_packet(skb, xnet); min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + GENEVE_BASE_HLEN + info->options_len + ip_hdr_len; err = skb_cow_head(skb, min_headroom); if (unlikely(err)) goto free_dst; err = udp_tunnel_handle_offloads(skb, udp_sum); if (err) goto free_dst; gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len); inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB); geneve_build_header(gnvh, info, inner_proto); skb_set_inner_protocol(skb, inner_proto); return 0; free_dst: dst_release(dst); return err; } static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs4, struct flowi4 *fl4, const struct ip_tunnel_info *info, __be16 dport, __be16 sport, __u8 *full_tos) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); struct dst_cache *dst_cache; struct rtable *rt = NULL; __u8 tos; if (!gs4) return ERR_PTR(-EIO); memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_mark = skb->mark; fl4->flowi4_proto = IPPROTO_UDP; fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; fl4->fl4_dport = dport; fl4->fl4_sport = sport; fl4->flowi4_flags = info->key.flow_flags; tos = info->key.tos; if ((tos == 1) && !geneve->cfg.collect_md) { tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb); use_cache = false; } fl4->flowi4_tos = RT_TOS(tos); if (full_tos) *full_tos = tos; dst_cache = (struct dst_cache *)&info->dst_cache; if (use_cache) { rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); if (rt) return rt; } rt = ip_route_output_key(geneve->net, fl4); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); return ERR_PTR(-ENETUNREACH); } if (rt->dst.dev == dev) { /* is this necessary? */ netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); ip_rt_put(rt); return ERR_PTR(-ELOOP); } if (use_cache) dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr); return rt; } #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs6, struct flowi6 *fl6, const struct ip_tunnel_info *info, __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); struct dst_entry *dst = NULL; struct dst_cache *dst_cache; __u8 prio; if (!gs6) return ERR_PTR(-EIO); memset(fl6, 0, sizeof(*fl6)); fl6->flowi6_mark = skb->mark; fl6->flowi6_proto = IPPROTO_UDP; fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; fl6->fl6_dport = dport; fl6->fl6_sport = sport; prio = info->key.tos; if ((prio == 1) && !geneve->cfg.collect_md) { prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); use_cache = false; } fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label); dst_cache = (struct dst_cache *)&info->dst_cache; if (use_cache) { dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); if (dst) return dst; } dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, NULL); if (IS_ERR(dst)) { netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); return ERR_PTR(-ENETUNREACH); } if (dst->dev == dev) { /* is this necessary? */ netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr); dst_release(dst); return ERR_PTR(-ELOOP); } if (use_cache) dst_cache_set_ip6(dst_cache, dst, &fl6->saddr); return dst; } #endif static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); const struct ip_tunnel_key *key = &info->key; struct rtable *rt; struct flowi4 fl4; __u8 full_tos; __u8 tos, ttl; __be16 df = 0; __be16 sport; int err; if (!pskb_inet_may_pull(skb)) return -EINVAL; sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, geneve->cfg.info.key.tp_dst, sport, &full_tos); if (IS_ERR(rt)) return PTR_ERR(rt); err = skb_tunnel_check_pmtu(skb, &rt->dst, GENEVE_IPV4_HLEN + info->options_len, netif_is_any_bridge_port(dev)); if (err < 0) { dst_release(&rt->dst); return err; } else if (err) { struct ip_tunnel_info *info; info = skb_tunnel_info(skb); if (info) { struct ip_tunnel_info *unclone; unclone = skb_tunnel_info_unclone(skb); if (unlikely(!unclone)) { dst_release(&rt->dst); return -ENOMEM; } unclone->key.u.ipv4.dst = fl4.saddr; unclone->key.u.ipv4.src = fl4.daddr; } if (!pskb_may_pull(skb, ETH_HLEN)) { dst_release(&rt->dst); return -EINVAL; } skb->protocol = eth_type_trans(skb, geneve->dev); __netif_rx(skb); dst_release(&rt->dst); return -EMSGSIZE; } if (geneve->cfg.collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb); if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else ttl = key->ttl; ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); if (geneve->cfg.df == GENEVE_DF_SET) { df = htons(IP_DF); } else if (geneve->cfg.df == GENEVE_DF_INHERIT) { struct ethhdr *eth = eth_hdr(skb); if (ntohs(eth->h_proto) == ETH_P_IPV6) { df = htons(IP_DF); } else if (ntohs(eth->h_proto) == ETH_P_IP) { struct iphdr *iph = ip_hdr(skb); if (iph->frag_off & htons(IP_DF)) df = htons(IP_DF); } } } err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr), geneve->cfg.inner_proto_inherit); if (unlikely(err)) return err; udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, !net_eq(geneve->net, dev_net(geneve->dev)), !(info->key.tun_flags & TUNNEL_CSUM)); return 0; } #if IS_ENABLED(CONFIG_IPV6) static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); const struct ip_tunnel_key *key = &info->key; struct dst_entry *dst = NULL; struct flowi6 fl6; __u8 prio, ttl; __be16 sport; int err; if (!pskb_inet_may_pull(skb)) return -EINVAL; sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, geneve->cfg.info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); err = skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len, netif_is_any_bridge_port(dev)); if (err < 0) { dst_release(dst); return err; } else if (err) { struct ip_tunnel_info *info = skb_tunnel_info(skb); if (info) { struct ip_tunnel_info *unclone; unclone = skb_tunnel_info_unclone(skb); if (unlikely(!unclone)) { dst_release(dst); return -ENOMEM; } unclone->key.u.ipv6.dst = fl6.saddr; unclone->key.u.ipv6.src = fl6.daddr; } if (!pskb_may_pull(skb, ETH_HLEN)) { dst_release(dst); return -EINVAL; } skb->protocol = eth_type_trans(skb, geneve->dev); __netif_rx(skb); dst_release(dst); return -EMSGSIZE; } if (geneve->cfg.collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; } else { prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), ip_hdr(skb), skb); if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else ttl = key->ttl; ttl = ttl ? : ip6_dst_hoplimit(dst); } err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr), geneve->cfg.inner_proto_inherit); if (unlikely(err)) return err; udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, &fl6.saddr, &fl6.daddr, prio, ttl, info->key.label, sport, geneve->cfg.info.key.tp_dst, !(info->key.tun_flags & TUNNEL_CSUM)); return 0; } #endif static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = NULL; int err; if (geneve->cfg.collect_md) { info = skb_tunnel_info(skb); if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } } else { info = &geneve->cfg.info; } rcu_read_lock(); #if IS_ENABLED(CONFIG_IPV6) if (info->mode & IP_TUNNEL_INFO_IPV6) err = geneve6_xmit_skb(skb, dev, geneve, info); else #endif err = geneve_xmit_skb(skb, dev, geneve, info); rcu_read_unlock(); if (likely(!err)) return NETDEV_TX_OK; if (err != -EMSGSIZE) dev_kfree_skb(skb); if (err == -ELOOP) dev->stats.collisions++; else if (err == -ENETUNREACH) dev->stats.tx_carrier_errors++; dev->stats.tx_errors++; return NETDEV_TX_OK; } static int geneve_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu > dev->max_mtu) new_mtu = dev->max_mtu; else if (new_mtu < dev->min_mtu) new_mtu = dev->min_mtu; dev->mtu = new_mtu; return 0; } static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct geneve_dev *geneve = netdev_priv(dev); __be16 sport; if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, geneve->cfg.info.key.tp_dst, sport, NULL); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); info->key.u.ipv4.src = fl4.saddr; #if IS_ENABLED(CONFIG_IPV6) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, geneve->cfg.info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); dst_release(dst); info->key.u.ipv6.src = fl6.saddr; #endif } else { return -EINVAL; } info->key.tp_src = sport; info->key.tp_dst = geneve->cfg.info.key.tp_dst; return 0; } static const struct net_device_ops geneve_netdev_ops = { .ndo_init = geneve_init, .ndo_uninit = geneve_uninit, .ndo_open = geneve_open, .ndo_stop = geneve_stop, .ndo_start_xmit = geneve_xmit, .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = geneve_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_fill_metadata_dst = geneve_fill_metadata_dst, }; static void geneve_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version)); strscpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver)); } static const struct ethtool_ops geneve_ethtool_ops = { .get_drvinfo = geneve_get_drvinfo, .get_link = ethtool_op_get_link, }; /* Info for udev, that this is a virtual tunnel endpoint */ static struct device_type geneve_type = { .name = "geneve", }; /* Calls the ndo_udp_tunnel_add of the caller in order to * supply the listening GENEVE udp ports. Callers are expected * to implement the ndo_udp_tunnel_add. */ static void geneve_offload_rx_ports(struct net_device *dev, bool push) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; rcu_read_lock(); list_for_each_entry_rcu(gs, &gn->sock_list, list) { if (push) { udp_tunnel_push_rx_port(dev, gs->sock, UDP_TUNNEL_TYPE_GENEVE); } else { udp_tunnel_drop_rx_port(dev, gs->sock, UDP_TUNNEL_TYPE_GENEVE); } } rcu_read_unlock(); } /* Initialize the device structure. */ static void geneve_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &geneve_netdev_ops; dev->ethtool_ops = &geneve_ethtool_ops; dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &geneve_type); dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->hw_features |= NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; /* MTU range: 68 - (something less than 65535) */ dev->min_mtu = ETH_MIN_MTU; /* The max_mtu calculation does not take account of GENEVE * options, to avoid excluding potentially valid * configurations. This will be further reduced by IPvX hdr size. */ dev->max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len; netif_keep_dst(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; eth_hw_addr_random(dev); } static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_UNSPEC] = { .strict_start_type = IFLA_GENEVE_INNER_PROTO_INHERIT }, [IFLA_GENEVE_ID] = { .type = NLA_U32 }, [IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, [IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) }, [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, [IFLA_GENEVE_LABEL] = { .type = NLA_U32 }, [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, [IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 }, [IFLA_GENEVE_DF] = { .type = NLA_U8 }, [IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], "Provided link layer address is not Ethernet"); return -EINVAL; } if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], "Provided Ethernet address is not unicast"); return -EADDRNOTAVAIL; } } if (!data) { NL_SET_ERR_MSG(extack, "Not enough attributes provided to perform the operation"); return -EINVAL; } if (data[IFLA_GENEVE_ID]) { __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); if (vni >= GENEVE_N_VID) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_ID], "Geneve ID must be lower than 16777216"); return -ERANGE; } } if (data[IFLA_GENEVE_DF]) { enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]); if (df < 0 || df > GENEVE_DF_MAX) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF], "Invalid DF attribute"); return -EINVAL; } } return 0; } static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, const struct ip_tunnel_info *info, bool *tun_on_same_port, bool *tun_collect_md) { struct geneve_dev *geneve, *t = NULL; *tun_on_same_port = false; *tun_collect_md = false; list_for_each_entry(geneve, &gn->geneve_list, next) { if (info->key.tp_dst == geneve->cfg.info.key.tp_dst) { *tun_collect_md = geneve->cfg.collect_md; *tun_on_same_port = true; } if (info->key.tun_id == geneve->cfg.info.key.tun_id && info->key.tp_dst == geneve->cfg.info.key.tp_dst && !memcmp(&info->key.u, &geneve->cfg.info.key.u, sizeof(info->key.u))) t = geneve; } return t; } static bool is_tnl_info_zero(const struct ip_tunnel_info *info) { return !(info->key.tun_id || info->key.tun_flags || info->key.tos || info->key.ttl || info->key.label || info->key.tp_src || memchr_inv(&info->key.u, 0, sizeof(info->key.u))); } static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, struct ip_tunnel_info *b) { if (ip_tunnel_info_af(a) == AF_INET) return a->key.u.ipv4.dst == b->key.u.ipv4.dst; else return ipv6_addr_equal(&a->key.u.ipv6.dst, &b->key.u.ipv6.dst); } static int geneve_configure(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack, const struct geneve_config *cfg) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); const struct ip_tunnel_info *info = &cfg->info; bool tun_collect_md, tun_on_same_port; int err, encap_len; if (cfg->collect_md && !is_tnl_info_zero(info)) { NL_SET_ERR_MSG(extack, "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified"); return -EINVAL; } geneve->net = net; geneve->dev = dev; t = geneve_find_dev(gn, info, &tun_on_same_port, &tun_collect_md); if (t) return -EBUSY; /* make enough headroom for basic scenario */ encap_len = GENEVE_BASE_HLEN + ETH_HLEN; if (!cfg->collect_md && ip_tunnel_info_af(info) == AF_INET) { encap_len += sizeof(struct iphdr); dev->max_mtu -= sizeof(struct iphdr); } else { encap_len += sizeof(struct ipv6hdr); dev->max_mtu -= sizeof(struct ipv6hdr); } dev->needed_headroom = encap_len + ETH_HLEN; if (cfg->collect_md) { if (tun_on_same_port) { NL_SET_ERR_MSG(extack, "There can be only one externally controlled device on a destination port"); return -EPERM; } } else { if (tun_collect_md) { NL_SET_ERR_MSG(extack, "There already exists an externally controlled device on this destination port"); return -EPERM; } } dst_cache_reset(&geneve->cfg.info.dst_cache); memcpy(&geneve->cfg, cfg, sizeof(*cfg)); if (geneve->cfg.inner_proto_inherit) { dev->header_ops = NULL; dev->type = ARPHRD_NONE; dev->hard_header_len = 0; dev->addr_len = 0; dev->flags = IFF_POINTOPOINT | IFF_NOARP; } err = register_netdevice(dev); if (err) return err; list_add(&geneve->next, &gn->geneve_list); return 0; } static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) { memset(info, 0, sizeof(*info)); info->key.tp_dst = htons(dst_port); } static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack, struct geneve_config *cfg, bool changelink) { struct ip_tunnel_info *info = &cfg->info; int attrtype; if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) { NL_SET_ERR_MSG(extack, "Cannot specify both IPv4 and IPv6 Remote addresses"); return -EINVAL; } if (data[IFLA_GENEVE_REMOTE]) { if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) { attrtype = IFLA_GENEVE_REMOTE; goto change_notsup; } info->key.u.ipv4.dst = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); if (ipv4_is_multicast(info->key.u.ipv4.dst)) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE], "Remote IPv4 address cannot be Multicast"); return -EINVAL; } } if (data[IFLA_GENEVE_REMOTE6]) { #if IS_ENABLED(CONFIG_IPV6) if (changelink && (ip_tunnel_info_af(info) == AF_INET)) { attrtype = IFLA_GENEVE_REMOTE6; goto change_notsup; } info->mode = IP_TUNNEL_INFO_IPV6; info->key.u.ipv6.dst = nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]); if (ipv6_addr_type(&info->key.u.ipv6.dst) & IPV6_ADDR_LINKLOCAL) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], "Remote IPv6 address cannot be link-local"); return -EINVAL; } if (ipv6_addr_is_multicast(&info->key.u.ipv6.dst)) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], "Remote IPv6 address cannot be Multicast"); return -EINVAL; } info->key.tun_flags |= TUNNEL_CSUM; cfg->use_udp6_rx_checksums = true; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; #endif } if (data[IFLA_GENEVE_ID]) { __u32 vni; __u8 tvni[3]; __be64 tunid; vni = nla_get_u32(data[IFLA_GENEVE_ID]); tvni[0] = (vni & 0x00ff0000) >> 16; tvni[1] = (vni & 0x0000ff00) >> 8; tvni[2] = vni & 0x000000ff; tunid = vni_to_tunnel_id(tvni); if (changelink && (tunid != info->key.tun_id)) { attrtype = IFLA_GENEVE_ID; goto change_notsup; } info->key.tun_id = tunid; } if (data[IFLA_GENEVE_TTL_INHERIT]) { if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT])) cfg->ttl_inherit = true; else cfg->ttl_inherit = false; } else if (data[IFLA_GENEVE_TTL]) { info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); cfg->ttl_inherit = false; } if (data[IFLA_GENEVE_TOS]) info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); if (data[IFLA_GENEVE_DF]) cfg->df = nla_get_u8(data[IFLA_GENEVE_DF]); if (data[IFLA_GENEVE_LABEL]) { info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & IPV6_FLOWLABEL_MASK; if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_LABEL], "Label attribute only applies for IPv6 Geneve devices"); return -EINVAL; } } if (data[IFLA_GENEVE_PORT]) { if (changelink) { attrtype = IFLA_GENEVE_PORT; goto change_notsup; } info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); } if (data[IFLA_GENEVE_COLLECT_METADATA]) { if (changelink) { attrtype = IFLA_GENEVE_COLLECT_METADATA; goto change_notsup; } cfg->collect_md = true; } if (data[IFLA_GENEVE_UDP_CSUM]) { if (changelink) { attrtype = IFLA_GENEVE_UDP_CSUM; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) info->key.tun_flags |= TUNNEL_CSUM; } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { #if IS_ENABLED(CONFIG_IPV6) if (changelink) { attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) info->key.tun_flags &= ~TUNNEL_CSUM; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX], "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; #endif } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { #if IS_ENABLED(CONFIG_IPV6) if (changelink) { attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) cfg->use_udp6_rx_checksums = false; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX], "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; #endif } if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) { if (changelink) { attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT; goto change_notsup; } cfg->inner_proto_inherit = true; } return 0; change_notsup: NL_SET_ERR_MSG_ATTR(extack, data[attrtype], "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported"); return -EOPNOTSUPP; } static void geneve_link_config(struct net_device *dev, struct ip_tunnel_info *info, struct nlattr *tb[]) { struct geneve_dev *geneve = netdev_priv(dev); int ldev_mtu = 0; if (tb[IFLA_MTU]) { geneve_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); return; } switch (ip_tunnel_info_af(info)) { case AF_INET: { struct flowi4 fl4 = { .daddr = info->key.u.ipv4.dst }; struct rtable *rt = ip_route_output_key(geneve->net, &fl4); if (!IS_ERR(rt) && rt->dst.dev) { ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV4_HLEN; ip_rt_put(rt); } break; } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { struct rt6_info *rt; if (!__in6_dev_get(dev)) break; rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, NULL, 0); if (rt && rt->dst.dev) ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; ip6_rt_put(rt); break; } #endif } if (ldev_mtu <= 0) return; geneve_change_mtu(dev, ldev_mtu - info->options_len); } static int geneve_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct geneve_config cfg = { .df = GENEVE_DF_UNSET, .use_udp6_rx_checksums = false, .ttl_inherit = false, .collect_md = false, }; int err; init_tnl_info(&cfg.info, GENEVE_UDP_PORT); err = geneve_nl2info(tb, data, extack, &cfg, false); if (err) return err; err = geneve_configure(net, dev, extack, &cfg); if (err) return err; geneve_link_config(dev, &cfg.info, tb); return 0; } /* Quiesces the geneve device data path for both TX and RX. * * On transmit geneve checks for non-NULL geneve_sock before it proceeds. * So, if we set that socket to NULL under RCU and wait for synchronize_net() * to complete for the existing set of in-flight packets to be transmitted, * then we would have quiesced the transmit data path. All the future packets * will get dropped until we unquiesce the data path. * * On receive geneve dereference the geneve_sock stashed in the socket. So, * if we set that to NULL under RCU and wait for synchronize_net() to * complete, then we would have quiesced the receive data path. */ static void geneve_quiesce(struct geneve_dev *geneve, struct geneve_sock **gs4, struct geneve_sock **gs6) { *gs4 = rtnl_dereference(geneve->sock4); rcu_assign_pointer(geneve->sock4, NULL); if (*gs4) rcu_assign_sk_user_data((*gs4)->sock->sk, NULL); #if IS_ENABLED(CONFIG_IPV6) *gs6 = rtnl_dereference(geneve->sock6); rcu_assign_pointer(geneve->sock6, NULL); if (*gs6) rcu_assign_sk_user_data((*gs6)->sock->sk, NULL); #else *gs6 = NULL; #endif synchronize_net(); } /* Resumes the geneve device data path for both TX and RX. */ static void geneve_unquiesce(struct geneve_dev *geneve, struct geneve_sock *gs4, struct geneve_sock __maybe_unused *gs6) { rcu_assign_pointer(geneve->sock4, gs4); if (gs4) rcu_assign_sk_user_data(gs4->sock->sk, gs4); #if IS_ENABLED(CONFIG_IPV6) rcu_assign_pointer(geneve->sock6, gs6); if (gs6) rcu_assign_sk_user_data(gs6->sock->sk, gs6); #endif synchronize_net(); } static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs4, *gs6; struct geneve_config cfg; int err; /* If the geneve device is configured for metadata (or externally * controlled, for example, OVS), then nothing can be changed. */ if (geneve->cfg.collect_md) return -EOPNOTSUPP; /* Start with the existing info. */ memcpy(&cfg, &geneve->cfg, sizeof(cfg)); err = geneve_nl2info(tb, data, extack, &cfg, true); if (err) return err; if (!geneve_dst_addr_equal(&geneve->cfg.info, &cfg.info)) { dst_cache_reset(&cfg.info.dst_cache); geneve_link_config(dev, &cfg.info, tb); } geneve_quiesce(geneve, &gs4, &gs6); memcpy(&geneve->cfg, &cfg, sizeof(cfg)); geneve_unquiesce(geneve, gs4, gs6); return 0; } static void geneve_dellink(struct net_device *dev, struct list_head *head) { struct geneve_dev *geneve = netdev_priv(dev); list_del(&geneve->next); unregister_netdevice_queue(dev, head); } static size_t geneve_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_DF */ nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */ nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */ nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */ 0; } static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = &geneve->cfg.info; bool ttl_inherit = geneve->cfg.ttl_inherit; bool metadata = geneve->cfg.collect_md; __u8 tmp_vni[3]; __u32 vni; tunnel_id_to_vni(info->key.tun_id, tmp_vni); vni = (tmp_vni[0] << 16) | (tmp_vni[1] << 8) | tmp_vni[2]; if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) goto nla_put_failure; if (!metadata && ip_tunnel_info_af(info) == AF_INET) { if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, info->key.u.ipv4.dst)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, !!(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) } else if (!metadata) { if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, &info->key.u.ipv6.dst)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, !(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; #endif } if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->cfg.df)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) goto nla_put_failure; if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, !geneve->cfg.use_udp6_rx_checksums)) goto nla_put_failure; #endif if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit)) goto nla_put_failure; if (geneve->cfg.inner_proto_inherit && nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops geneve_link_ops __read_mostly = { .kind = "geneve", .maxtype = IFLA_GENEVE_MAX, .policy = geneve_policy, .priv_size = sizeof(struct geneve_dev), .setup = geneve_setup, .validate = geneve_validate, .newlink = geneve_newlink, .changelink = geneve_changelink, .dellink = geneve_dellink, .get_size = geneve_get_size, .fill_info = geneve_fill_info, }; struct net_device *geneve_dev_create_fb(struct net *net, const char *name, u8 name_assign_type, u16 dst_port) { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; LIST_HEAD(list_kill); int err; struct geneve_config cfg = { .df = GENEVE_DF_UNSET, .use_udp6_rx_checksums = true, .ttl_inherit = false, .collect_md = true, }; memset(tb, 0, sizeof(tb)); dev = rtnl_create_link(net, name, name_assign_type, &geneve_link_ops, tb, NULL); if (IS_ERR(dev)) return dev; init_tnl_info(&cfg.info, dst_port); err = geneve_configure(net, dev, NULL, &cfg); if (err) { free_netdev(dev); return ERR_PTR(err); } /* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. */ err = geneve_change_mtu(dev, IP_MAX_MTU); if (err) goto err; err = rtnl_configure_link(dev, NULL, 0, NULL); if (err < 0) goto err; return dev; err: geneve_dellink(dev, &list_kill); unregister_netdevice_many(&list_kill); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); static int geneve_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) geneve_offload_rx_ports(dev, true); else if (event == NETDEV_UDP_TUNNEL_DROP_INFO) geneve_offload_rx_ports(dev, false); return NOTIFY_DONE; } static struct notifier_block geneve_notifier_block __read_mostly = { .notifier_call = geneve_netdevice_event, }; static __net_init int geneve_init_net(struct net *net) { struct geneve_net *gn = net_generic(net, geneve_net_id); INIT_LIST_HEAD(&gn->geneve_list); INIT_LIST_HEAD(&gn->sock_list); return 0; } static void geneve_destroy_tunnels(struct net *net, struct list_head *head) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *geneve, *next; struct net_device *dev, *aux; /* gather any geneve devices that were moved into this ns */ for_each_netdev_safe(net, dev, aux) if (dev->rtnl_link_ops == &geneve_link_ops) unregister_netdevice_queue(dev, head); /* now gather any other geneve devices that were created in this ns */ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { /* If geneve->dev is in the same netns, it was already added * to the list by the previous loop. */ if (!net_eq(dev_net(geneve->dev), net)) unregister_netdevice_queue(geneve->dev, head); } } static void __net_exit geneve_exit_batch_net(struct list_head *net_list) { struct net *net; LIST_HEAD(list); rtnl_lock(); list_for_each_entry(net, net_list, exit_list) geneve_destroy_tunnels(net, &list); /* unregister the devices gathered above */ unregister_netdevice_many(&list); rtnl_unlock(); list_for_each_entry(net, net_list, exit_list) { const struct geneve_net *gn = net_generic(net, geneve_net_id); WARN_ON_ONCE(!list_empty(&gn->sock_list)); } } static struct pernet_operations geneve_net_ops = { .init = geneve_init_net, .exit_batch = geneve_exit_batch_net, .id = &geneve_net_id, .size = sizeof(struct geneve_net), }; static int __init geneve_init_module(void) { int rc; rc = register_pernet_subsys(&geneve_net_ops); if (rc) goto out1; rc = register_netdevice_notifier(&geneve_notifier_block); if (rc) goto out2; rc = rtnl_link_register(&geneve_link_ops); if (rc) goto out3; return 0; out3: unregister_netdevice_notifier(&geneve_notifier_block); out2: unregister_pernet_subsys(&geneve_net_ops); out1: return rc; } late_initcall(geneve_init_module); static void __exit geneve_cleanup_module(void) { rtnl_link_unregister(&geneve_link_ops); unregister_netdevice_notifier(&geneve_notifier_block); unregister_pernet_subsys(&geneve_net_ops); } module_exit(geneve_cleanup_module); MODULE_LICENSE("GPL"); MODULE_VERSION(GENEVE_NETDEV_VER); MODULE_AUTHOR("John W. Linville <[email protected]>"); MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic"); MODULE_ALIAS_RTNL_LINK("geneve");
linux-master
drivers/net/geneve.c
// SPDX-License-Identifier: GPL-2.0-or-later /* MHI Network driver - Network over MHI bus * * Copyright (C) 2020 Linaro Ltd <[email protected]> */ #include <linux/if_arp.h> #include <linux/mhi.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/u64_stats_sync.h> #define MHI_NET_MIN_MTU ETH_MIN_MTU #define MHI_NET_MAX_MTU 0xffff #define MHI_NET_DEFAULT_MTU 0x4000 struct mhi_net_stats { u64_stats_t rx_packets; u64_stats_t rx_bytes; u64_stats_t rx_errors; u64_stats_t tx_packets; u64_stats_t tx_bytes; u64_stats_t tx_errors; u64_stats_t tx_dropped; struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; }; struct mhi_net_dev { struct mhi_device *mdev; struct net_device *ndev; struct sk_buff *skbagg_head; struct sk_buff *skbagg_tail; struct delayed_work rx_refill; struct mhi_net_stats stats; u32 rx_queue_sz; int msg_enable; unsigned int mru; }; struct mhi_device_info { const char *netname; }; static int mhi_ndo_open(struct net_device *ndev) { struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); /* Feed the rx buffer pool */ schedule_delayed_work(&mhi_netdev->rx_refill, 0); /* Carrier is established via out-of-band channel (e.g. qmi) */ netif_carrier_on(ndev); netif_start_queue(ndev); return 0; } static int mhi_ndo_stop(struct net_device *ndev) { struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); netif_stop_queue(ndev); netif_carrier_off(ndev); cancel_delayed_work_sync(&mhi_netdev->rx_refill); return 0; } static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) { struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); struct mhi_device *mdev = mhi_netdev->mdev; int err; err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); if (unlikely(err)) { net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", ndev->name, err); dev_kfree_skb_any(skb); goto exit_drop; } if (mhi_queue_is_full(mdev, DMA_TO_DEVICE)) netif_stop_queue(ndev); return NETDEV_TX_OK; exit_drop: u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); u64_stats_inc(&mhi_netdev->stats.tx_dropped); u64_stats_update_end(&mhi_netdev->stats.tx_syncp); return NETDEV_TX_OK; } static void mhi_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); unsigned int start; do { start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp); stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); } while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start)); do { start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp); stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); } while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start)); } static const struct net_device_ops mhi_netdev_ops = { .ndo_open = mhi_ndo_open, .ndo_stop = mhi_ndo_stop, .ndo_start_xmit = mhi_ndo_xmit, .ndo_get_stats64 = mhi_ndo_get_stats64, }; static void mhi_net_setup(struct net_device *ndev) { ndev->header_ops = NULL; /* No header */ ndev->type = ARPHRD_RAWIP; ndev->hard_header_len = 0; ndev->addr_len = 0; ndev->flags = IFF_POINTOPOINT | IFF_NOARP; ndev->netdev_ops = &mhi_netdev_ops; ndev->mtu = MHI_NET_DEFAULT_MTU; ndev->min_mtu = MHI_NET_MIN_MTU; ndev->max_mtu = MHI_NET_MAX_MTU; ndev->tx_queue_len = 1000; } static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb) { struct sk_buff *head = mhi_netdev->skbagg_head; struct sk_buff *tail = mhi_netdev->skbagg_tail; /* This is non-paged skb chaining using frag_list */ if (!head) { mhi_netdev->skbagg_head = skb; return skb; } if (!skb_shinfo(head)->frag_list) skb_shinfo(head)->frag_list = skb; else tail->next = skb; head->len += skb->len; head->data_len += skb->len; head->truesize += skb->truesize; mhi_netdev->skbagg_tail = skb; return mhi_netdev->skbagg_head; } static void mhi_net_dl_callback(struct mhi_device *mhi_dev, struct mhi_result *mhi_res) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); struct sk_buff *skb = mhi_res->buf_addr; int free_desc_count; free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); if (unlikely(mhi_res->transaction_status)) { switch (mhi_res->transaction_status) { case -EOVERFLOW: /* Packet can not fit in one MHI buffer and has been * split over multiple MHI transfers, do re-aggregation. * That usually means the device side MTU is larger than * the host side MTU/MRU. Since this is not optimal, * print a warning (once). */ netdev_warn_once(mhi_netdev->ndev, "Fragmented packets received, fix MTU?\n"); skb_put(skb, mhi_res->bytes_xferd); mhi_net_skb_agg(mhi_netdev, skb); break; case -ENOTCONN: /* MHI layer stopping/resetting the DL channel */ dev_kfree_skb_any(skb); return; default: /* Unknown error, simply drop */ dev_kfree_skb_any(skb); u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); u64_stats_inc(&mhi_netdev->stats.rx_errors); u64_stats_update_end(&mhi_netdev->stats.rx_syncp); } } else { skb_put(skb, mhi_res->bytes_xferd); if (mhi_netdev->skbagg_head) { /* Aggregate the final fragment */ skb = mhi_net_skb_agg(mhi_netdev, skb); mhi_netdev->skbagg_head = NULL; } switch (skb->data[0] & 0xf0) { case 0x40: skb->protocol = htons(ETH_P_IP); break; case 0x60: skb->protocol = htons(ETH_P_IPV6); break; default: skb->protocol = htons(ETH_P_MAP); break; } u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); u64_stats_inc(&mhi_netdev->stats.rx_packets); u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); u64_stats_update_end(&mhi_netdev->stats.rx_syncp); __netif_rx(skb); } /* Refill if RX buffers queue becomes low */ if (free_desc_count >= mhi_netdev->rx_queue_sz / 2) schedule_delayed_work(&mhi_netdev->rx_refill, 0); } static void mhi_net_ul_callback(struct mhi_device *mhi_dev, struct mhi_result *mhi_res) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); struct net_device *ndev = mhi_netdev->ndev; struct mhi_device *mdev = mhi_netdev->mdev; struct sk_buff *skb = mhi_res->buf_addr; /* Hardware has consumed the buffer, so free the skb (which is not * freed by the MHI stack) and perform accounting. */ dev_consume_skb_any(skb); u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); if (unlikely(mhi_res->transaction_status)) { /* MHI layer stopping/resetting the UL channel */ if (mhi_res->transaction_status == -ENOTCONN) { u64_stats_update_end(&mhi_netdev->stats.tx_syncp); return; } u64_stats_inc(&mhi_netdev->stats.tx_errors); } else { u64_stats_inc(&mhi_netdev->stats.tx_packets); u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd); } u64_stats_update_end(&mhi_netdev->stats.tx_syncp); if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE)) netif_wake_queue(ndev); } static void mhi_net_rx_refill_work(struct work_struct *work) { struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, rx_refill.work); struct net_device *ndev = mhi_netdev->ndev; struct mhi_device *mdev = mhi_netdev->mdev; struct sk_buff *skb; unsigned int size; int err; size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu); while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { skb = netdev_alloc_skb(ndev, size); if (unlikely(!skb)) break; err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT); if (unlikely(err)) { net_err_ratelimited("%s: Failed to queue RX buf (%d)\n", ndev->name, err); kfree_skb(skb); break; } /* Do not hog the CPU if rx buffers are consumed faster than * queued (unlikely). */ cond_resched(); } /* If we're still starved of rx buffers, reschedule later */ if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz) schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); } static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev) { struct mhi_net_dev *mhi_netdev; int err; mhi_netdev = netdev_priv(ndev); dev_set_drvdata(&mhi_dev->dev, mhi_netdev); mhi_netdev->ndev = ndev; mhi_netdev->mdev = mhi_dev; mhi_netdev->skbagg_head = NULL; mhi_netdev->mru = mhi_dev->mhi_cntrl->mru; INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); u64_stats_init(&mhi_netdev->stats.rx_syncp); u64_stats_init(&mhi_netdev->stats.tx_syncp); /* Start MHI channels */ err = mhi_prepare_for_transfer(mhi_dev); if (err) return err; /* Number of transfer descriptors determines size of the queue */ mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); err = register_netdev(ndev); if (err) return err; return 0; } static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev) { struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); unregister_netdev(ndev); mhi_unprepare_from_transfer(mhi_dev); kfree_skb(mhi_netdev->skbagg_head); free_netdev(ndev); dev_set_drvdata(&mhi_dev->dev, NULL); } static int mhi_net_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data; struct net_device *ndev; int err; ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname, NET_NAME_PREDICTABLE, mhi_net_setup); if (!ndev) return -ENOMEM; SET_NETDEV_DEV(ndev, &mhi_dev->dev); err = mhi_net_newlink(mhi_dev, ndev); if (err) { free_netdev(ndev); return err; } return 0; } static void mhi_net_remove(struct mhi_device *mhi_dev) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); mhi_net_dellink(mhi_dev, mhi_netdev->ndev); } static const struct mhi_device_info mhi_hwip0 = { .netname = "mhi_hwip%d", }; static const struct mhi_device_info mhi_swip0 = { .netname = "mhi_swip%d", }; static const struct mhi_device_id mhi_net_id_table[] = { /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 }, /* Software data PATH (to modem CPU) */ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 }, {} }; MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); static struct mhi_driver mhi_net_driver = { .probe = mhi_net_probe, .remove = mhi_net_remove, .dl_xfer_cb = mhi_net_dl_callback, .ul_xfer_cb = mhi_net_ul_callback, .id_table = mhi_net_id_table, .driver = { .name = "mhi_net", }, }; module_mhi_driver(mhi_net_driver); MODULE_AUTHOR("Loic Poulain <[email protected]>"); MODULE_DESCRIPTION("Network over MHI"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mhi_net.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/ethtool.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <net/net_namespace.h> #include <linux/if_arp.h> #include <net/rtnetlink.h> static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev) { dev_lstats_add(dev, skb->len); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int nlmon_dev_init(struct net_device *dev) { dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); return dev->lstats == NULL ? -ENOMEM : 0; } static void nlmon_dev_uninit(struct net_device *dev) { free_percpu(dev->lstats); } struct nlmon { struct netlink_tap nt; }; static int nlmon_open(struct net_device *dev) { struct nlmon *nlmon = netdev_priv(dev); nlmon->nt.dev = dev; nlmon->nt.module = THIS_MODULE; return netlink_add_tap(&nlmon->nt); } static int nlmon_close(struct net_device *dev) { struct nlmon *nlmon = netdev_priv(dev); return netlink_remove_tap(&nlmon->nt); } static void nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u64 packets, bytes; dev_lstats_read(dev, &packets, &bytes); stats->rx_packets = packets; stats->tx_packets = 0; stats->rx_bytes = bytes; stats->tx_bytes = 0; } static u32 always_on(struct net_device *dev) { return 1; } static const struct ethtool_ops nlmon_ethtool_ops = { .get_link = always_on, }; static const struct net_device_ops nlmon_ops = { .ndo_init = nlmon_dev_init, .ndo_uninit = nlmon_dev_uninit, .ndo_open = nlmon_open, .ndo_stop = nlmon_close, .ndo_start_xmit = nlmon_xmit, .ndo_get_stats64 = nlmon_get_stats64, }; static void nlmon_setup(struct net_device *dev) { dev->type = ARPHRD_NETLINK; dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &nlmon_ops; dev->ethtool_ops = &nlmon_ethtool_ops; dev->needs_free_netdev = true; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_LLTX; dev->flags = IFF_NOARP; /* That's rather a softlimit here, which, of course, * can be altered. Not a real MTU, but what is to be * expected in most cases. */ dev->mtu = NLMSG_GOODSIZE; dev->min_mtu = sizeof(struct nlmsghdr); } static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) return -EINVAL; return 0; } static struct rtnl_link_ops nlmon_link_ops __read_mostly = { .kind = "nlmon", .priv_size = sizeof(struct nlmon), .setup = nlmon_setup, .validate = nlmon_validate, }; static __init int nlmon_register(void) { return rtnl_link_register(&nlmon_link_ops); } static __exit void nlmon_unregister(void) { rtnl_link_unregister(&nlmon_link_ops); } module_init(nlmon_register); module_exit(nlmon_unregister); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Daniel Borkmann <[email protected]>"); MODULE_AUTHOR("Mathieu Geli <[email protected]>"); MODULE_DESCRIPTION("Netlink monitoring device"); MODULE_ALIAS_RTNL_LINK("nlmon");
linux-master
drivers/net/nlmon.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Holds initial configuration information for devices. * * Version: @(#)Space.c 1.0.7 08/12/93 * * Authors: Ross Biro * Fred N. van Kempen, <[email protected]> * Donald J. Becker, <[email protected]> * * Changelog: * Stephen Hemminger (09/2003) * - get rid of pre-linked dev list, dynamic device allocation * Paul Gortmaker (03/2002) * - struct init cleanup, enable multiple ISA autoprobes. * Arnaldo Carvalho de Melo <[email protected]> - 09/1999 * - fix sbni: s/device/net_device/ * Paul Gortmaker (06/98): * - sort probes in a sane way, make sure all (safe) probes * get run once & failed autoprobes don't autoprobe again. */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netlink.h> #include <net/Space.h> /* * This structure holds boot-time configured netdevice settings. They * are then used in the device probing. */ struct netdev_boot_setup { char name[IFNAMSIZ]; struct ifmap map; }; #define NETDEV_BOOT_SETUP_MAX 8 /****************************************************************************** * * Device Boot-time Settings Routines * ******************************************************************************/ /* Boot time configuration table */ static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; /** * netdev_boot_setup_add - add new setup entry * @name: name of the device * @map: configured settings for the device * * Adds new setup entry to the dev_boot_setup list. The function * returns 0 on error and 1 on success. This is a generic routine to * all netdevices. */ static int netdev_boot_setup_add(char *name, struct ifmap *map) { struct netdev_boot_setup *s; int i; s = dev_boot_setup; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { memset(s[i].name, 0, sizeof(s[i].name)); strscpy(s[i].name, name, IFNAMSIZ); memcpy(&s[i].map, map, sizeof(s[i].map)); break; } } return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; } /** * netdev_boot_setup_check - check boot time settings * @dev: the netdevice * * Check boot time settings for the device. * The found settings are set for the device to be used * later in the device probing. * Returns 0 if no settings found, 1 if they are. */ int netdev_boot_setup_check(struct net_device *dev) { struct netdev_boot_setup *s = dev_boot_setup; int i; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && !strcmp(dev->name, s[i].name)) { dev->irq = s[i].map.irq; dev->base_addr = s[i].map.base_addr; dev->mem_start = s[i].map.mem_start; dev->mem_end = s[i].map.mem_end; return 1; } } return 0; } EXPORT_SYMBOL(netdev_boot_setup_check); /** * netdev_boot_base - get address from boot time settings * @prefix: prefix for network device * @unit: id for network device * * Check boot time settings for the base address of device. * The found settings are set for the device to be used * later in the device probing. * Returns 0 if no settings found. */ static unsigned long netdev_boot_base(const char *prefix, int unit) { const struct netdev_boot_setup *s = dev_boot_setup; char name[IFNAMSIZ]; int i; sprintf(name, "%s%d", prefix, unit); /* * If device already registered then return base of 1 * to indicate not to probe for this interface */ if (__dev_get_by_name(&init_net, name)) return 1; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) if (!strcmp(name, s[i].name)) return s[i].map.base_addr; return 0; } /* * Saves at boot time configured settings for any netdevice. */ static int __init netdev_boot_setup(char *str) { int ints[5]; struct ifmap map; str = get_options(str, ARRAY_SIZE(ints), ints); if (!str || !*str) return 0; /* Save settings */ memset(&map, 0, sizeof(map)); if (ints[0] > 0) map.irq = ints[1]; if (ints[0] > 1) map.base_addr = ints[2]; if (ints[0] > 2) map.mem_start = ints[3]; if (ints[0] > 3) map.mem_end = ints[4]; /* Add new entry to the list */ return netdev_boot_setup_add(str, &map); } __setup("netdev=", netdev_boot_setup); static int __init ether_boot_setup(char *str) { return netdev_boot_setup(str); } __setup("ether=", ether_boot_setup); /* A unified ethernet device probe. This is the easiest way to have every * ethernet adaptor have the name "eth[0123...]". */ struct devprobe2 { struct net_device *(*probe)(int unit); int status; /* non-zero if autoprobe has failed */ }; static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe) { struct net_device *dev; for (; p->probe; p++) { if (autoprobe && p->status) continue; dev = p->probe(unit); if (!IS_ERR(dev)) return 0; if (autoprobe) p->status = PTR_ERR(dev); } return -ENODEV; } /* ISA probes that touch addresses < 0x400 (including those that also * look for EISA/PCI cards in addition to ISA cards). */ static struct devprobe2 isa_probes[] __initdata = { #ifdef CONFIG_3C515 {tc515_probe, 0}, #endif #ifdef CONFIG_ULTRA {ultra_probe, 0}, #endif #ifdef CONFIG_WD80x3 {wd_probe, 0}, #endif #if defined(CONFIG_NE2000) /* ISA (use ne2k-pci for PCI cards) */ {ne_probe, 0}, #endif #ifdef CONFIG_LANCE /* ISA/VLB (use pcnet32 for PCI cards) */ {lance_probe, 0}, #endif #ifdef CONFIG_SMC9194 {smc_init, 0}, #endif #ifdef CONFIG_CS89x0_ISA {cs89x0_probe, 0}, #endif {NULL, 0}, }; /* Unified ethernet device probe, segmented per architecture and * per bus interface. This drives the legacy devices only for now. */ static void __init ethif_probe2(int unit) { unsigned long base_addr = netdev_boot_base("eth", unit); if (base_addr == 1) return; probe_list2(unit, isa_probes, base_addr == 0); } /* Statically configured drivers -- order matters here. */ static int __init net_olddevs_init(void) { int num; for (num = 0; num < 8; ++num) ethif_probe2(num); #ifdef CONFIG_COPS cops_probe(0); cops_probe(1); cops_probe(2); #endif return 0; } device_initcall(net_olddevs_init);
linux-master
drivers/net/Space.c
/* * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/if_ether.h> #include <net/tcp.h> #include <linux/udp.h> #include <linux/moduleparam.h> #include <linux/mm.h> #include <linux/slab.h> #include <net/ip.h> #include <linux/bpf.h> #include <net/page_pool/types.h> #include <linux/bpf_trace.h> #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/page.h> #include <xen/platform_pci.h> #include <xen/grant_table.h> #include <xen/interface/io/netif.h> #include <xen/interface/memory.h> #include <xen/interface/grant_table.h> /* Module parameters */ #define MAX_QUEUES_DEFAULT 8 static unsigned int xennet_max_queues; module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); static bool __read_mostly xennet_trusted = true; module_param_named(trusted, xennet_trusted, bool, 0644); MODULE_PARM_DESC(trusted, "Is the backend trusted"); #define XENNET_TIMEOUT (5 * HZ) static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #define RX_COPY_THRESHOLD 256 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) /* Minimum number of Rx slots (includes slot for GSO metadata). */ #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) /* Queue name is interface name with "-qNNN" appended */ #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) static DECLARE_WAIT_QUEUE_HEAD(module_wq); struct netfront_stats { u64 packets; u64 bytes; struct u64_stats_sync syncp; }; struct netfront_info; struct netfront_queue { unsigned int id; /* Queue ID, 0-based */ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ struct netfront_info *info; struct bpf_prog __rcu *xdp_prog; struct napi_struct napi; /* Split event channels support, tx_* == rx_* when using * single event channel. */ unsigned int tx_evtchn, rx_evtchn; unsigned int tx_irq, rx_irq; /* Only used when split event channels support is enabled */ char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ spinlock_t tx_lock; struct xen_netif_tx_front_ring tx; int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * are linked from tx_skb_freelist through tx_link. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE]; unsigned short tx_link[NET_TX_RING_SIZE]; #define TX_LINK_NONE 0xffff #define TX_PENDING 0xfffe grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; unsigned int tx_pend_queue; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; int rx_ring_ref; struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; unsigned int rx_rsp_unconsumed; spinlock_t rx_cons_lock; struct page_pool *page_pool; struct xdp_rxq_info xdp_rxq; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct xenbus_device *xbdev; /* Multi-queue support */ struct netfront_queue *queues; /* Statistics */ struct netfront_stats __percpu *rx_stats; struct netfront_stats __percpu *tx_stats; /* XDP state */ bool netback_has_xdp_headroom; bool netfront_xdp_enabled; /* Is device behaving sane? */ bool broken; /* Should skbs be bounced into a zeroed buffer? */ bool bounce; atomic_t rx_gso_checksum_fixup; }; struct netfront_rx_info { struct xen_netif_rx_response rx; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static void add_id_to_list(unsigned *head, unsigned short *list, unsigned short id) { list[id] = *head; *head = id; } static unsigned short get_id_from_list(unsigned *head, unsigned short *list) { unsigned int id = *head; if (id != TX_LINK_NONE) { *head = list[id]; list[id] = TX_LINK_NONE; } return id; } static int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = queue->rx_skbs[i]; queue->rx_skbs[i] = NULL; return skb; } static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = queue->grant_rx_ref[i]; queue->grant_rx_ref[i] = INVALID_GRANT_REF; return ref; } #ifdef CONFIG_SYSFS static const struct attribute_group xennet_dev_group; #endif static bool xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } static void rx_refill_timeout(struct timer_list *t) { struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); napi_schedule(&queue->napi); } static int netfront_tx_slot_available(struct netfront_queue *queue) { return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); } static void xennet_maybe_wake_tx(struct netfront_queue *queue) { struct net_device *dev = queue->info->netdev; struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); if (unlikely(netif_tx_queue_stopped(dev_queue)) && netfront_tx_slot_available(queue) && likely(netif_running(dev))) netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); } static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) { struct sk_buff *skb; struct page *page; skb = __netdev_alloc_skb(queue->info->netdev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; page = page_pool_alloc_pages(queue->page_pool, GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); if (unlikely(!page)) { kfree_skb(skb); return NULL; } skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); skb->dev = queue->info->netdev; return skb; } static void xennet_alloc_rx_buffers(struct netfront_queue *queue) { RING_IDX req_prod = queue->rx.req_prod_pvt; int notify; int err = 0; if (unlikely(!netif_carrier_ok(queue->info->netdev))) return; for (req_prod = queue->rx.req_prod_pvt; req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; req_prod++) { struct sk_buff *skb; unsigned short id; grant_ref_t ref; struct page *page; struct xen_netif_rx_request *req; skb = xennet_alloc_one_rx_buffer(queue); if (!skb) { err = -ENOMEM; break; } id = xennet_rxidx(req_prod); BUG_ON(queue->rx_skbs[id]); queue->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&queue->gref_rx_head); WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); queue->grant_rx_ref[id] = ref; page = skb_frag_page(&skb_shinfo(skb)->frags[0]); req = RING_GET_REQUEST(&queue->rx, req_prod); gnttab_page_grant_foreign_access_ref_one(ref, queue->info->xbdev->otherend_id, page, 0); req->id = id; req->gref = ref; } queue->rx.req_prod_pvt = req_prod; /* Try again later if there are not enough requests or skb allocation * failed. * Enough requests is quantified as the sum of newly created slots and * the unconsumed slots at the backend. */ if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || unlikely(err)) { mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); return; } RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); if (notify) notify_remote_via_irq(queue->rx_irq); } static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i = 0; struct netfront_queue *queue = NULL; if (!np->queues || np->broken) return -ENODEV; for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; napi_enable(&queue->napi); spin_lock_bh(&queue->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(queue); queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) napi_schedule(&queue->napi); } spin_unlock_bh(&queue->rx_lock); } netif_tx_start_all_queues(dev); return 0; } static bool xennet_tx_buf_gc(struct netfront_queue *queue) { RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; bool more_to_do; bool work_done = false; const struct device *dev = &queue->info->netdev->dev; BUG_ON(!netif_carrier_ok(queue->info->netdev)); do { prod = queue->tx.sring->rsp_prod; if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) { dev_alert(dev, "Illegal number of responses %u\n", prod - queue->tx.rsp_cons); goto err; } rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = queue->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response txrsp; work_done = true; RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); if (txrsp.status == XEN_NETIF_RSP_NULL) continue; id = txrsp.id; if (id >= RING_SIZE(&queue->tx)) { dev_alert(dev, "Response has incorrect id (%u)\n", id); goto err; } if (queue->tx_link[id] != TX_PENDING) { dev_alert(dev, "Response for inactive request\n"); goto err; } queue->tx_link[id] = TX_LINK_NONE; skb = queue->tx_skbs[id]; queue->tx_skbs[id] = NULL; if (unlikely(!gnttab_end_foreign_access_ref( queue->grant_tx_ref[id]))) { dev_alert(dev, "Grant still in use by backend domain\n"); goto err; } gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = INVALID_GRANT_REF; queue->grant_tx_page[id] = NULL; add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); dev_kfree_skb_irq(skb); } queue->tx.rsp_cons = prod; RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); } while (more_to_do); xennet_maybe_wake_tx(queue); return work_done; err: queue->info->broken = true; dev_alert(dev, "Disabled for further use\n"); return work_done; } struct xennet_gnttab_make_txreq { struct netfront_queue *queue; struct sk_buff *skb; struct page *page; struct xen_netif_tx_request *tx; /* Last request on ring page */ struct xen_netif_tx_request tx_local; /* Last request local copy*/ unsigned int size; }; static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, unsigned int len, void *data) { struct xennet_gnttab_make_txreq *info = data; unsigned int id; struct xen_netif_tx_request *tx; grant_ref_t ref; /* convenient aliases */ struct page *page = info->page; struct netfront_queue *queue = info->queue; struct sk_buff *skb = info->skb; id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); ref = gnttab_claim_grant_reference(&queue->gref_tx_head); WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gfn, GNTMAP_readonly); queue->tx_skbs[id] = skb; queue->grant_tx_page[id] = page; queue->grant_tx_ref[id] = ref; info->tx_local.id = id; info->tx_local.gref = ref; info->tx_local.offset = offset; info->tx_local.size = len; info->tx_local.flags = 0; *tx = info->tx_local; /* * Put the request in the pending queue, it will be set to be pending * when the producer index is about to be raised. */ add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); info->tx = tx; info->size += info->tx_local.size; } static struct xen_netif_tx_request *xennet_make_first_txreq( struct xennet_gnttab_make_txreq *info, unsigned int offset, unsigned int len) { info->size = 0; gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info); return info->tx; } static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, unsigned int len, void *data) { struct xennet_gnttab_make_txreq *info = data; info->tx->flags |= XEN_NETTXF_more_data; skb_get(info->skb); xennet_tx_setup_grant(gfn, offset, len, data); } static void xennet_make_txreqs( struct xennet_gnttab_make_txreq *info, struct page *page, unsigned int offset, unsigned int len) { /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; while (len) { info->page = page; info->size = 0; gnttab_foreach_grant_in_range(page, offset, len, xennet_make_one_txreq, info); page++; offset = 0; len -= info->size; } } /* * Count how many ring slots are required to send this skb. Each frag * might be a compound page. */ static int xennet_count_skb_slots(struct sk_buff *skb) { int i, frags = skb_shinfo(skb)->nr_frags; int slots; slots = gnttab_count_grant(offset_in_page(skb->data), skb_headlen(skb)); for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); unsigned long offset = skb_frag_off(frag); /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; slots += gnttab_count_grant(offset, size); } return slots; } static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { unsigned int num_queues = dev->real_num_tx_queues; u32 hash; u16 queue_idx; /* First, check if there is only one queue */ if (num_queues == 1) { queue_idx = 0; } else { hash = skb_get_hash(skb); queue_idx = hash % num_queues; } return queue_idx; } static void xennet_mark_tx_pending(struct netfront_queue *queue) { unsigned int i; while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) != TX_LINK_NONE) queue->tx_link[i] = TX_PENDING; } static int xennet_xdp_xmit_one(struct net_device *dev, struct netfront_queue *queue, struct xdp_frame *xdpf) { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct xennet_gnttab_make_txreq info = { .queue = queue, .skb = NULL, .page = virt_to_page(xdpf->data), }; int notify; xennet_make_first_txreq(&info, offset_in_page(xdpf->data), xdpf->len); xennet_mark_tx_pending(queue); RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); u64_stats_update_begin(&tx_stats->syncp); tx_stats->bytes += xdpf->len; tx_stats->packets++; u64_stats_update_end(&tx_stats->syncp); xennet_tx_buf_gc(queue); return 0; } static int xennet_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { unsigned int num_queues = dev->real_num_tx_queues; struct netfront_info *np = netdev_priv(dev); struct netfront_queue *queue = NULL; unsigned long irq_flags; int nxmit = 0; int i; if (unlikely(np->broken)) return -ENODEV; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; queue = &np->queues[smp_processor_id() % num_queues]; spin_lock_irqsave(&queue->tx_lock, irq_flags); for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; if (!xdpf) continue; if (xennet_xdp_xmit_one(dev, queue, xdpf)) break; nxmit++; } spin_unlock_irqrestore(&queue->tx_lock, irq_flags); return nxmit; } static struct sk_buff *bounce_skb(const struct sk_buff *skb) { unsigned int headerlen = skb_headroom(skb); /* Align size to allocate full pages and avoid contiguous data leaks */ unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, XEN_PAGE_SIZE); struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); if (!n) return NULL; if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { WARN_ONCE(1, "misaligned skb allocated\n"); kfree_skb(n); return NULL; } /* Set the data pointer */ skb_reserve(n, headerlen); /* Set the tail pointer and length */ skb_put(n, skb->len); BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); skb_copy_header(n, skb); return n; } #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct xen_netif_tx_request *first_tx; unsigned int i; int notify; int slots; struct page *page; unsigned int offset; unsigned int len; unsigned long flags; struct netfront_queue *queue = NULL; struct xennet_gnttab_make_txreq info = { }; unsigned int num_queues = dev->real_num_tx_queues; u16 queue_index; struct sk_buff *nskb; /* Drop the packet if no queues are set up */ if (num_queues < 1) goto drop; if (unlikely(np->broken)) goto drop; /* Determine which queue to transmit this SKB on */ queue_index = skb_get_queue_mapping(skb); queue = &np->queues[queue_index]; /* If skb->len is too big for wire format, drop skb and alert * user about misconfiguration. */ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { net_alert_ratelimited( "xennet: skb->len = %u, too big for wire format\n", skb->len); goto drop; } slots = xennet_count_skb_slots(skb); if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) { net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", slots, skb->len); if (skb_linearize(skb)) goto drop; } page = virt_to_page(skb->data); offset = offset_in_page(skb->data); /* The first req should be at least ETH_HLEN size or the packet will be * dropped by netback. * * If the backend is not trusted bounce all data to zeroed pages to * avoid exposing contiguous data on the granted page not belonging to * the skb. */ if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { nskb = bounce_skb(skb); if (!nskb) goto drop; dev_consume_skb_any(skb); skb = nskb; page = virt_to_page(skb->data); offset = offset_in_page(skb->data); } len = skb_headlen(skb); spin_lock_irqsave(&queue->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&queue->tx_lock, flags); goto drop; } /* First request for the linear area. */ info.queue = queue; info.skb = skb; info.page = page; first_tx = xennet_make_first_txreq(&info, offset, len); offset += info.tx_local.size; if (offset == PAGE_SIZE) { page++; offset = 0; } len -= info.tx_local.size; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ first_tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ first_tx->flags |= XEN_NETTXF_data_validated; /* Optional extra info after the first request. */ if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); first_tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? XEN_NETIF_GSO_TYPE_TCPV6 : XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } /* Requests for the rest of the linear area. */ xennet_make_txreqs(&info, page, offset, len); /* Requests for all the frags. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; xennet_make_txreqs(&info, skb_frag_page(frag), skb_frag_off(frag), skb_frag_size(frag)); } /* First request has the packet length. */ first_tx->size = skb->len; /* timestamp packet in software */ skb_tx_timestamp(skb); xennet_mark_tx_pending(queue); RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); u64_stats_update_begin(&tx_stats->syncp); tx_stats->bytes += skb->len; tx_stats->packets++; u64_stats_update_end(&tx_stats->syncp); /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(queue); if (!netfront_tx_slot_available(queue)) netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); spin_unlock_irqrestore(&queue->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; struct netfront_queue *queue; netif_tx_stop_all_queues(np->netdev); for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; napi_disable(&queue->napi); } return 0; } static void xennet_destroy_queues(struct netfront_info *info) { unsigned int i; for (i = 0; i < info->netdev->real_num_tx_queues; i++) { struct netfront_queue *queue = &info->queues[i]; if (netif_running(info->netdev)) napi_disable(&queue->napi); netif_napi_del(&queue->napi); } kfree(info->queues); info->queues = NULL; } static void xennet_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); xennet_destroy_queues(np); } static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) { unsigned long flags; spin_lock_irqsave(&queue->rx_cons_lock, flags); queue->rx.rsp_cons = val; queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); spin_unlock_irqrestore(&queue->rx_cons_lock, flags); } static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(queue->rx.req_prod_pvt); BUG_ON(queue->rx_skbs[new]); queue->rx_skbs[new] = skb; queue->grant_rx_ref[new] = ref; RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; queue->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_queue *queue, struct xen_netif_extra_info *extras, RING_IDX rp) { struct xen_netif_extra_info extra; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) dev_warn(dev, "Missing extra info\n"); err = -EBADR; break; } RING_COPY_RESPONSE(&queue->rx, ++cons, &extra); if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", extra.type); err = -EINVAL; } else { extras[extra.type - 1] = extra; } skb = xennet_get_rx_skb(queue, cons); ref = xennet_get_rx_ref(queue, cons); xennet_move_rx_slot(queue, skb, ref); } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); xennet_set_rx_rsp_cons(queue, cons); return err; } static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, struct xen_netif_rx_response *rx, struct bpf_prog *prog, struct xdp_buff *xdp, bool *need_xdp_flush) { struct xdp_frame *xdpf; u32 len = rx->status; u32 act; int err; xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, &queue->xdp_rxq); xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM, len, false); act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_TX: get_page(pdata); xdpf = xdp_convert_buff_to_frame(xdp); err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); if (unlikely(!err)) xdp_return_frame_rx_napi(xdpf); else if (unlikely(err < 0)) trace_xdp_exception(queue->info->netdev, prog, act); break; case XDP_REDIRECT: get_page(pdata); err = xdp_do_redirect(queue->info->netdev, xdp, prog); *need_xdp_flush = true; if (unlikely(err)) trace_xdp_exception(queue->info->netdev, prog, act); break; case XDP_PASS: case XDP_DROP: break; case XDP_ABORTED: trace_xdp_exception(queue->info->netdev, prog, act); break; default: bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act); } return act; } static int xennet_get_responses(struct netfront_queue *queue, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, bool *need_xdp_flush) { struct xen_netif_rx_response *rx = &rinfo->rx, rx_local; int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(queue, cons); struct xen_netif_extra_info *extras = rinfo->extras; grant_ref_t ref = xennet_get_rx_ref(queue, cons); struct device *dev = &queue->info->netdev->dev; struct bpf_prog *xdp_prog; struct xdp_buff xdp; int slots = 1; int err = 0; u32 verdict; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(queue, extras, rp); if (!err) { if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) { struct xen_netif_extra_info *xdp; xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; rx->offset = xdp->u.xdp.headroom; } } cons = queue->rx.rsp_cons; } for (;;) { /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backend. */ if (ref == INVALID_GRANT_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (unlikely(rx->status < 0 || rx->offset + rx->status > XEN_PAGE_SIZE)) { if (net_ratelimit()) dev_warn(dev, "rx->offset: %u, size: %d\n", rx->offset, rx->status); xennet_move_rx_slot(queue, skb, ref); err = -EINVAL; goto next; } if (!gnttab_end_foreign_access_ref(ref)) { dev_alert(dev, "Grant still in use by backend domain\n"); queue->info->broken = true; dev_alert(dev, "Disabled for further use\n"); return -EINVAL; } gnttab_release_grant_reference(&queue->gref_rx_head, ref); rcu_read_lock(); xdp_prog = rcu_dereference(queue->xdp_prog); if (xdp_prog) { if (!(rx->flags & XEN_NETRXF_more_data)) { /* currently only a single page contains data */ verdict = xennet_run_xdp(queue, skb_frag_page(&skb_shinfo(skb)->frags[0]), rx, xdp_prog, &xdp, need_xdp_flush); if (verdict != XDP_PASS) err = -EINVAL; } else { /* drop the frame */ err = -EINVAL; } } rcu_read_unlock(); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + slots == rp) { if (net_ratelimit()) dev_warn(dev, "Need more slots\n"); err = -ENOENT; break; } RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local); rx = &rx_local; skb = xennet_get_rx_skb(queue, cons + slots); ref = xennet_get_rx_ref(queue, cons + slots); slots++; } if (unlikely(slots > max)) { if (net_ratelimit()) dev_warn(dev, "Too many slots\n"); err = -E2BIG; } if (unlikely(err)) xennet_set_rx_rsp_cons(queue, cons + slots); return err; } static int xennet_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) pr_warn("GSO size must not be zero\n"); return -EINVAL; } if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { if (net_ratelimit()) pr_warn("Bad GSO type %d\n", gso->u.gso.type); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static int xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct xen_netif_rx_response rx; skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to < skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { xennet_set_rx_rsp_cons(queue, ++cons + skb_queue_len(list)); kfree_skb(nskb); return -ENOENT; } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(nfrag), rx.offset, rx.status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } xennet_set_rx_rsp_cons(queue, cons); return 0; } static int checksum_setup(struct net_device *dev, struct sk_buff *skb) { bool recalculate_partial_csum = false; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { struct netfront_info *np = netdev_priv(dev); atomic_inc(&np->rx_gso_checksum_fixup); skb->ip_summed = CHECKSUM_PARTIAL; recalculate_partial_csum = true; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; return skb_checksum_setup(skb, recalculate_partial_csum); } static int handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) { struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); int packets_dropped = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { int pull_to = NETFRONT_SKB_CB(skb)->pull_to; if (pull_to > skb_headlen(skb)) __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, queue->info->netdev); skb_reset_network_header(skb); if (checksum_setup(queue->info->netdev, skb)) { kfree_skb(skb); packets_dropped++; queue->info->netdev->stats.rx_errors++; continue; } u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += skb->len; u64_stats_update_end(&rx_stats->syncp); /* Pass it up. */ napi_gro_receive(&queue->napi, skb); } return packets_dropped; } static int xennet_poll(struct napi_struct *napi, int budget) { struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); struct net_device *dev = queue->info->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; int err; bool need_xdp_flush = false; spin_lock(&queue->rx_lock); skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = queue->rx.sring->rsp_prod; if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) { dev_alert(&dev->dev, "Illegal number of responses %u\n", rp - queue->rx.rsp_cons); queue->info->broken = true; spin_unlock(&queue->rx_lock); return 0; } rmb(); /* Ensure we see queued responses up to 'rp'. */ i = queue->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { RING_COPY_RESPONSE(&queue->rx, i, rx); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(queue, &rinfo, rp, &tmpq, &need_xdp_flush); if (unlikely(err)) { if (queue->info->broken) { spin_unlock(&queue->rx_lock); return 0; } err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = queue->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); xennet_set_rx_rsp_cons(queue, queue->rx.rsp_cons + skb_queue_len(&tmpq)); goto err; } } NETFRONT_SKB_CB(skb)->pull_to = rx->status; if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset); skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); skb->data_len = rx->status; skb->len += rx->status; if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) goto err; if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_queue_tail(&rxq, skb); i = queue->rx.rsp_cons + 1; xennet_set_rx_rsp_cons(queue, i); work_done++; } if (need_xdp_flush) xdp_do_flush(); __skb_queue_purge(&errq); work_done -= handle_incoming_queue(queue, &rxq); xennet_alloc_rx_buffers(queue); if (work_done < budget) { int more_to_do = 0; napi_complete_done(napi, work_done); RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); if (more_to_do) napi_schedule(napi); } spin_unlock(&queue->rx_lock); return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static void xennet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; for_each_possible_cpu(cpu) { struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { start = u64_stats_fetch_begin(&tx_stats->syncp); tx_packets = tx_stats->packets; tx_bytes = tx_stats->bytes; } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); do { start = u64_stats_fetch_begin(&rx_stats->syncp); rx_packets = rx_stats->packets; rx_bytes = rx_stats->bytes; } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; } static void xennet_release_tx_bufs(struct netfront_queue *queue) { struct sk_buff *skb; int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ if (!queue->tx_skbs[i]) continue; skb = queue->tx_skbs[i]; queue->tx_skbs[i] = NULL; get_page(queue->grant_tx_page[i]); gnttab_end_foreign_access(queue->grant_tx_ref[i], queue->grant_tx_page[i]); queue->grant_tx_page[i] = NULL; queue->grant_tx_ref[i] = INVALID_GRANT_REF; add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); dev_kfree_skb_irq(skb); } } static void xennet_release_rx_bufs(struct netfront_queue *queue) { int id, ref; spin_lock_bh(&queue->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { struct sk_buff *skb; struct page *page; skb = queue->rx_skbs[id]; if (!skb) continue; ref = queue->grant_rx_ref[id]; if (ref == INVALID_GRANT_REF) continue; page = skb_frag_page(&skb_shinfo(skb)->frags[0]); /* gnttab_end_foreign_access() needs a page ref until * foreign access is ended (which may be deferred). */ get_page(page); gnttab_end_foreign_access(ref, page); queue->grant_rx_ref[id] = INVALID_GRANT_REF; kfree_skb(skb); } spin_unlock_bh(&queue->rx_lock); } static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); if (features & NETIF_F_SG && !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) features &= ~NETIF_F_SG; if (features & NETIF_F_IPV6_CSUM && !xenbus_read_unsigned(np->xbdev->otherend, "feature-ipv6-csum-offload", 0)) features &= ~NETIF_F_IPV6_CSUM; if (features & NETIF_F_TSO && !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) features &= ~NETIF_F_TSO; if (features & NETIF_F_TSO6 && !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) features &= ~NETIF_F_TSO6; return features; } static int xennet_set_features(struct net_device *dev, netdev_features_t features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) { unsigned long flags; if (unlikely(queue->info->broken)) return false; spin_lock_irqsave(&queue->tx_lock, flags); if (xennet_tx_buf_gc(queue)) *eoi = 0; spin_unlock_irqrestore(&queue->tx_lock, flags); return true; } static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) { unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; if (likely(xennet_handle_tx(dev_id, &eoiflag))) xen_irq_lateeoi(irq, eoiflag); return IRQ_HANDLED; } static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) { unsigned int work_queued; unsigned long flags; if (unlikely(queue->info->broken)) return false; spin_lock_irqsave(&queue->rx_cons_lock, flags); work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); if (work_queued > queue->rx_rsp_unconsumed) { queue->rx_rsp_unconsumed = work_queued; *eoi = 0; } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { const struct device *dev = &queue->info->netdev->dev; spin_unlock_irqrestore(&queue->rx_cons_lock, flags); dev_alert(dev, "RX producer index going backwards\n"); dev_alert(dev, "Disabled for further use\n"); queue->info->broken = true; return false; } spin_unlock_irqrestore(&queue->rx_cons_lock, flags); if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) napi_schedule(&queue->napi); return true; } static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) { unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; if (likely(xennet_handle_rx(dev_id, &eoiflag))) xen_irq_lateeoi(irq, eoiflag); return IRQ_HANDLED; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; if (xennet_handle_tx(dev_id, &eoiflag) && xennet_handle_rx(dev_id, &eoiflag)) xen_irq_lateeoi(irq, eoiflag); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { /* Poll each queue */ struct netfront_info *info = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; if (info->broken) return; for (i = 0; i < num_queues; ++i) xennet_interrupt(0, &info->queues[i]); } #endif #define NETBACK_XDP_HEADROOM_DISABLE 0 #define NETBACK_XDP_HEADROOM_ENABLE 1 static int talk_to_netback_xdp(struct netfront_info *np, int xdp) { int err; unsigned short headroom; headroom = xdp ? XDP_PACKET_HEADROOM : 0; err = xenbus_printf(XBT_NIL, np->xbdev->nodename, "xdp-headroom", "%hu", headroom); if (err) pr_warn("Error writing xdp-headroom\n"); return err; } static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM; struct netfront_info *np = netdev_priv(dev); struct bpf_prog *old_prog; unsigned int i, err; if (dev->mtu > max_mtu) { netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu); return -EINVAL; } if (!np->netback_has_xdp_headroom) return 0; xenbus_switch_state(np->xbdev, XenbusStateReconfiguring); err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE : NETBACK_XDP_HEADROOM_DISABLE); if (err) return err; /* avoid the race with XDP headroom adjustment */ wait_event(module_wq, xenbus_read_driver_state(np->xbdev->otherend) == XenbusStateReconfigured); np->netfront_xdp_enabled = true; old_prog = rtnl_dereference(np->queues[0].xdp_prog); if (prog) bpf_prog_add(prog, dev->real_num_tx_queues); for (i = 0; i < dev->real_num_tx_queues; ++i) rcu_assign_pointer(np->queues[i].xdp_prog, prog); if (old_prog) for (i = 0; i < dev->real_num_tx_queues; ++i) bpf_prog_put(old_prog); xenbus_switch_state(np->xbdev, XenbusStateConnected); return 0; } static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct netfront_info *np = netdev_priv(dev); if (np->broken) return -ENODEV; switch (xdp->command) { case XDP_SETUP_PROG: return xennet_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = xennet_uninit, .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats64 = xennet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, .ndo_select_queue = xennet_select_queue, .ndo_bpf = xennet_xdp, .ndo_xdp_xmit = xennet_xdp_xmit, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif }; static void xennet_free_netdev(struct net_device *netdev) { struct netfront_info *np = netdev_priv(netdev); free_percpu(np->rx_stats); free_percpu(np->tx_stats); free_netdev(netdev); } static struct net_device *xennet_create_dev(struct xenbus_device *dev) { int err; struct net_device *netdev; struct netfront_info *np; netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); if (!netdev) return ERR_PTR(-ENOMEM); np = netdev_priv(netdev); np->xbdev = dev; np->queues = NULL; err = -ENOMEM; np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); if (np->rx_stats == NULL) goto exit; np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); if (np->tx_stats == NULL) goto exit; netdev->netdev_ops = &xennet_netdev_ops; netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; netdev->ethtool_ops = &xennet_ethtool_ops; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; np->netfront_xdp_enabled = false; netif_carrier_off(netdev); do { xenbus_switch_state(dev, XenbusStateInitialising); err = wait_event_timeout(module_wq, xenbus_read_driver_state(dev->otherend) != XenbusStateClosed && xenbus_read_driver_state(dev->otherend) != XenbusStateUnknown, XENNET_TIMEOUT); } while (!err); return netdev; exit: xennet_free_netdev(netdev); return ERR_PTR(err); } /* * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; netdev = xennet_create_dev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); #ifdef CONFIG_SYSFS info->netdev->sysfs_groups[0] = &xennet_dev_group; #endif return 0; } static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ if (ref != INVALID_GRANT_REF) gnttab_end_foreign_access(ref, virt_to_page(page)); } static void xennet_disconnect_backend(struct netfront_info *info) { unsigned int i = 0; unsigned int num_queues = info->netdev->real_num_tx_queues; netif_carrier_off(info->netdev); for (i = 0; i < num_queues && info->queues; ++i) { struct netfront_queue *queue = &info->queues[i]; del_timer_sync(&queue->rx_refill_timer); if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) unbind_from_irqhandler(queue->tx_irq, queue); if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { unbind_from_irqhandler(queue->tx_irq, queue); unbind_from_irqhandler(queue->rx_irq, queue); } queue->tx_evtchn = queue->rx_evtchn = 0; queue->tx_irq = queue->rx_irq = 0; if (netif_running(info->netdev)) napi_synchronize(&queue->napi); xennet_release_tx_bufs(queue); xennet_release_rx_bufs(queue); gnttab_free_grant_references(queue->gref_tx_head); gnttab_free_grant_references(queue->gref_rx_head); /* End access and free the pages */ xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_end_access(queue->rx_ring_ref, queue->rx.sring); queue->tx_ring_ref = INVALID_GRANT_REF; queue->rx_ring_ref = INVALID_GRANT_REF; queue->tx.sring = NULL; queue->rx.sring = NULL; page_pool_destroy(queue->page_pool); } } /* * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); netif_tx_lock_bh(info->netdev); netif_device_detach(info->netdev); netif_tx_unlock_bh(info->netdev); xennet_disconnect_backend(info); rtnl_lock(); if (info->queues) xennet_destroy_queues(info); rtnl_unlock(); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } static int setup_netfront_single(struct netfront_queue *queue) { int err; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); if (err < 0) goto fail; err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, xennet_interrupt, 0, queue->info->netdev->name, queue); if (err < 0) goto bind_fail; queue->rx_evtchn = queue->tx_evtchn; queue->rx_irq = queue->tx_irq = err; return 0; bind_fail: xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); queue->tx_evtchn = 0; fail: return err; } static int setup_netfront_split(struct netfront_queue *queue) { int err; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); if (err < 0) goto fail; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); if (err < 0) goto alloc_rx_evtchn_fail; snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, xennet_tx_interrupt, 0, queue->tx_irq_name, queue); if (err < 0) goto bind_tx_fail; queue->tx_irq = err; snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, xennet_rx_interrupt, 0, queue->rx_irq_name, queue); if (err < 0) goto bind_rx_fail; queue->rx_irq = err; return 0; bind_rx_fail: unbind_from_irqhandler(queue->tx_irq, queue); queue->tx_irq = 0; bind_tx_fail: xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); queue->rx_evtchn = 0; alloc_rx_evtchn_fail: xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); queue->tx_evtchn = 0; fail: return err; } static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err; queue->tx_ring_ref = INVALID_GRANT_REF; queue->rx_ring_ref = INVALID_GRANT_REF; queue->rx.sring = NULL; queue->tx.sring = NULL; err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs, 1, &queue->tx_ring_ref); if (err) goto fail; XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs, 1, &queue->rx_ring_ref); if (err) goto fail; XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); if (feature_split_evtchn) err = setup_netfront_split(queue); /* setup single event channel if * a) feature-split-event-channels == 0 * b) feature-split-event-channels == 1 but failed to setup */ if (!feature_split_evtchn || err) err = setup_netfront_single(queue); if (err) goto fail; return 0; fail: xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref); xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref); return err; } /* Queue-specific initialisation * This used to be done in xennet_create_dev() but must now * be run per-queue. */ static int xennet_init_queue(struct netfront_queue *queue) { unsigned short i; int err = 0; char *devid; spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); spin_lock_init(&queue->rx_cons_lock); timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); devid = strrchr(queue->info->xbdev->nodename, '/') + 1; snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", devid, queue->id); /* Initialise tx_skb_freelist as a free chain containing every entry. */ queue->tx_skb_freelist = 0; queue->tx_pend_queue = TX_LINK_NONE; for (i = 0; i < NET_TX_RING_SIZE; i++) { queue->tx_link[i] = i + 1; queue->grant_tx_ref[i] = INVALID_GRANT_REF; queue->grant_tx_page[i] = NULL; } queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { queue->rx_skbs[i] = NULL; queue->grant_rx_ref[i] = INVALID_GRANT_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, &queue->gref_tx_head) < 0) { pr_alert("can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, &queue->gref_rx_head) < 0) { pr_alert("can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } return 0; exit_free_tx: gnttab_free_grant_references(queue->gref_tx_head); exit: return err; } static int write_queue_xenstore_keys(struct netfront_queue *queue, struct xenbus_transaction *xbt, int write_hierarchical) { /* Write the queue-specific keys into XenStore in the traditional * way for a single queue, or in a queue subkeys for multiple * queues. */ struct xenbus_device *dev = queue->info->xbdev; int err; const char *message; char *path; size_t pathsize; /* Choose the correct place to write the keys */ if (write_hierarchical) { pathsize = strlen(dev->nodename) + 10; path = kzalloc(pathsize, GFP_KERNEL); if (!path) { err = -ENOMEM; message = "out of memory while writing ring references"; goto error; } snprintf(path, pathsize, "%s/queue-%u", dev->nodename, queue->id); } else { path = (char *)dev->nodename; } /* Write ring references */ err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", queue->tx_ring_ref); if (err) { message = "writing tx-ring-ref"; goto error; } err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", queue->rx_ring_ref); if (err) { message = "writing rx-ring-ref"; goto error; } /* Write event channels; taking into account both shared * and split event channel scenarios. */ if (queue->tx_evtchn == queue->rx_evtchn) { /* Shared event channel */ err = xenbus_printf(*xbt, path, "event-channel", "%u", queue->tx_evtchn); if (err) { message = "writing event-channel"; goto error; } } else { /* Split event channels */ err = xenbus_printf(*xbt, path, "event-channel-tx", "%u", queue->tx_evtchn); if (err) { message = "writing event-channel-tx"; goto error; } err = xenbus_printf(*xbt, path, "event-channel-rx", "%u", queue->rx_evtchn); if (err) { message = "writing event-channel-rx"; goto error; } } if (write_hierarchical) kfree(path); return 0; error: if (write_hierarchical) kfree(path); xenbus_dev_fatal(dev, err, "%s", message); return err; } static int xennet_create_page_pool(struct netfront_queue *queue) { int err; struct page_pool_params pp_params = { .order = 0, .flags = 0, .pool_size = NET_RX_RING_SIZE, .nid = NUMA_NO_NODE, .dev = &queue->info->netdev->dev, .offset = XDP_PACKET_HEADROOM, .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, }; queue->page_pool = page_pool_create(&pp_params); if (IS_ERR(queue->page_pool)) { err = PTR_ERR(queue->page_pool); queue->page_pool = NULL; return err; } err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev, queue->id, 0); if (err) { netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n"); goto err_free_pp; } err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq, MEM_TYPE_PAGE_POOL, queue->page_pool); if (err) { netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n"); goto err_unregister_rxq; } return 0; err_unregister_rxq: xdp_rxq_info_unreg(&queue->xdp_rxq); err_free_pp: page_pool_destroy(queue->page_pool); queue->page_pool = NULL; return err; } static int xennet_create_queues(struct netfront_info *info, unsigned int *num_queues) { unsigned int i; int ret; info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), GFP_KERNEL); if (!info->queues) return -ENOMEM; for (i = 0; i < *num_queues; i++) { struct netfront_queue *queue = &info->queues[i]; queue->id = i; queue->info = info; ret = xennet_init_queue(queue); if (ret < 0) { dev_warn(&info->xbdev->dev, "only created %d queues\n", i); *num_queues = i; break; } /* use page pool recycling instead of buddy allocator */ ret = xennet_create_page_pool(queue); if (ret < 0) { dev_err(&info->xbdev->dev, "can't allocate page pool\n"); *num_queues = i; return ret; } netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll); if (netif_running(info->netdev)) napi_enable(&queue->napi); } netif_set_real_num_tx_queues(info->netdev, *num_queues); if (*num_queues == 0) { dev_err(&info->xbdev->dev, "no queues\n"); return -EINVAL; } return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_netback(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; unsigned int feature_split_evtchn; unsigned int i = 0; unsigned int max_queues = 0; struct netfront_queue *queue = NULL; unsigned int num_queues = 1; u8 addr[ETH_ALEN]; info->netdev->irq = 0; /* Check if backend is trusted. */ info->bounce = !xennet_trusted || !xenbus_read_unsigned(dev->nodename, "trusted", 1); /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); num_queues = min(max_queues, xennet_max_queues); /* Check feature-split-event-channels */ feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, "feature-split-event-channels", 0); /* Read mac addr. */ err = xen_net_read_mac(dev, addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out_unlocked; } eth_hw_addr_set(info->netdev, addr); info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, "feature-xdp-headroom", 0); if (info->netback_has_xdp_headroom) { /* set the current xen-netfront xdp state */ err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ? NETBACK_XDP_HEADROOM_ENABLE : NETBACK_XDP_HEADROOM_DISABLE); if (err) goto out_unlocked; } rtnl_lock(); if (info->queues) xennet_destroy_queues(info); /* For the case of a reconnect reset the "broken" indicator. */ info->broken = false; err = xennet_create_queues(info, &num_queues); if (err < 0) { xenbus_dev_fatal(dev, err, "creating queues"); kfree(info->queues); info->queues = NULL; goto out; } rtnl_unlock(); /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = setup_netfront(dev, queue, feature_split_evtchn); if (err) goto destroy_ring; } again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } if (xenbus_exists(XBT_NIL, info->xbdev->otherend, "multi-queue-max-queues")) { /* Write the number of queues */ err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u", num_queues); if (err) { message = "writing multi-queue-num-queues"; goto abort_transaction_no_dev_fatal; } } if (num_queues == 1) { err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ if (err) goto abort_transaction_no_dev_fatal; } else { /* Write the keys for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ if (err) goto abort_transaction_no_dev_fatal; } } /* The remaining keys are not queue-specific */ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); if (err) { message = "writing feature-gso-tcpv6"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", "1"); if (err) { message = "writing feature-ipv6-csum-offload"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_dev_fatal(dev, err, "%s", message); abort_transaction_no_dev_fatal: xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); rtnl_lock(); xennet_destroy_queues(info); out: rtnl_unlock(); out_unlocked: device_unregister(&dev->dev); return err; } static int xennet_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = 0; int err; unsigned int j = 0; struct netfront_queue *queue = NULL; if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; } err = talk_to_netback(np->xbdev, np); if (err) return err; if (np->netback_has_xdp_headroom) pr_info("backend supports XDP headroom\n"); if (np->bounce) dev_info(&np->xbdev->dev, "bouncing transmitted data to zeroed pages\n"); /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; if (dev->reg_state == NETREG_UNINITIALIZED) { err = register_netdev(dev); if (err) { pr_warn("%s: register_netdev err=%d\n", __func__, err); device_unregister(&np->xbdev->dev); return err; } } rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); /* * All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netif_tx_lock_bh(np->netdev); netif_device_attach(np->netdev); netif_tx_unlock_bh(np->netdev); netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; notify_remote_via_irq(queue->tx_irq); if (queue->tx_irq != queue->rx_irq) notify_remote_via_irq(queue->rx_irq); spin_lock_bh(&queue->rx_lock); xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); } return 0; } /* * Callback received when the backend's state changes. */ static void netback_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); wake_up_all(&module_wq); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: netdev_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_checksum_fixup", offsetof(struct netfront_info, rx_gso_checksum_fixup) }, }; static int xennet_get_sset_count(struct net_device *dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); default: return -EINVAL; } } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { void *np = netdev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, .get_ts_info = ethtool_op_get_ts_info, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); } static ssize_t store_rxbuf(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { char *endp; if (!capable(CAP_NET_ADMIN)) return -EPERM; simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; /* rxbuf_min and rxbuf_max are no longer configurable. */ return len; } static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf); static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf); static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL); static struct attribute *xennet_dev_attrs[] = { &dev_attr_rxbuf_min.attr, &dev_attr_rxbuf_max.attr, &dev_attr_rxbuf_cur.attr, NULL }; static const struct attribute_group xennet_dev_group = { .attrs = xennet_dev_attrs }; #endif /* CONFIG_SYSFS */ static void xennet_bus_close(struct xenbus_device *dev) { int ret; if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) return; do { xenbus_switch_state(dev, XenbusStateClosing); ret = wait_event_timeout(module_wq, xenbus_read_driver_state(dev->otherend) == XenbusStateClosing || xenbus_read_driver_state(dev->otherend) == XenbusStateClosed || xenbus_read_driver_state(dev->otherend) == XenbusStateUnknown, XENNET_TIMEOUT); } while (!ret); if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) return; do { xenbus_switch_state(dev, XenbusStateClosed); ret = wait_event_timeout(module_wq, xenbus_read_driver_state(dev->otherend) == XenbusStateClosed || xenbus_read_driver_state(dev->otherend) == XenbusStateUnknown, XENNET_TIMEOUT); } while (!ret); } static void xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) unregister_netdev(info->netdev); if (info->queues) { rtnl_lock(); xennet_destroy_queues(info); rtnl_unlock(); } xennet_free_netdev(info->netdev); } static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; static struct xenbus_driver netfront_driver = { .ids = netfront_ids, .probe = netfront_probe, .remove = xennet_remove, .resume = netfront_resume, .otherend_changed = netback_changed, }; static int __init netif_init(void) { if (!xen_domain()) return -ENODEV; if (!xen_has_pv_nic_devices()) return -ENODEV; pr_info("Initialising Xen virtual ethernet driver\n"); /* Allow as many queues as there are CPUs inut max. 8 if user has not * specified a value. */ if (xennet_max_queues == 0) xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, num_online_cpus()); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_DESCRIPTION("Xen virtual network device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:vif"); MODULE_ALIAS("xennet");
linux-master
drivers/net/xen-netfront.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2021 Taehee Yoo <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/jhash.h> #include <linux/if_tunnel.h> #include <linux/net.h> #include <linux/igmp.h> #include <linux/workqueue.h> #include <net/sch_generic.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/udp.h> #include <net/udp_tunnel.h> #include <net/icmp.h> #include <net/mld.h> #include <net/amt.h> #include <uapi/linux/amt.h> #include <linux/security.h> #include <net/gro_cells.h> #include <net/ipv6.h> #include <net/if_inet6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/inet_common.h> #include <net/ip6_checksum.h> static struct workqueue_struct *amt_wq; static HLIST_HEAD(source_gc_list); /* Lock for source_gc_list */ static spinlock_t source_gc_lock; static struct delayed_work source_gc_wq; static char *status_str[] = { "AMT_STATUS_INIT", "AMT_STATUS_SENT_DISCOVERY", "AMT_STATUS_RECEIVED_DISCOVERY", "AMT_STATUS_SENT_ADVERTISEMENT", "AMT_STATUS_RECEIVED_ADVERTISEMENT", "AMT_STATUS_SENT_REQUEST", "AMT_STATUS_RECEIVED_REQUEST", "AMT_STATUS_SENT_QUERY", "AMT_STATUS_RECEIVED_QUERY", "AMT_STATUS_SENT_UPDATE", "AMT_STATUS_RECEIVED_UPDATE", }; static char *type_str[] = { "", /* Type 0 is not defined */ "AMT_MSG_DISCOVERY", "AMT_MSG_ADVERTISEMENT", "AMT_MSG_REQUEST", "AMT_MSG_MEMBERSHIP_QUERY", "AMT_MSG_MEMBERSHIP_UPDATE", "AMT_MSG_MULTICAST_DATA", "AMT_MSG_TEARDOWN", }; static char *action_str[] = { "AMT_ACT_GMI", "AMT_ACT_GMI_ZERO", "AMT_ACT_GT", "AMT_ACT_STATUS_FWD_NEW", "AMT_ACT_STATUS_D_FWD_NEW", "AMT_ACT_STATUS_NONE_NEW", }; static struct igmpv3_grec igmpv3_zero_grec; #if IS_ENABLED(CONFIG_IPV6) #define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } } static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT; static struct mld2_grec mldv2_zero_grec; #endif static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) > sizeof_field(struct sk_buff, cb)); return (struct amt_skb_cb *)((void *)skb->cb + sizeof(struct qdisc_skb_cb)); } static void __amt_source_gc_work(void) { struct amt_source_node *snode; struct hlist_head gc_list; struct hlist_node *t; spin_lock_bh(&source_gc_lock); hlist_move_list(&source_gc_list, &gc_list); spin_unlock_bh(&source_gc_lock); hlist_for_each_entry_safe(snode, t, &gc_list, node) { hlist_del_rcu(&snode->node); kfree_rcu(snode, rcu); } } static void amt_source_gc_work(struct work_struct *work) { __amt_source_gc_work(); spin_lock_bh(&source_gc_lock); mod_delayed_work(amt_wq, &source_gc_wq, msecs_to_jiffies(AMT_GC_INTERVAL)); spin_unlock_bh(&source_gc_lock); } static bool amt_addr_equal(union amt_addr *a, union amt_addr *b) { return !memcmp(a, b, sizeof(union amt_addr)); } static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src) { u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed); return reciprocal_scale(hash, tunnel->amt->hash_buckets); } static bool amt_status_filter(struct amt_source_node *snode, enum amt_filter filter) { bool rc = false; switch (filter) { case AMT_FILTER_FWD: if (snode->status == AMT_SOURCE_STATUS_FWD && snode->flags == AMT_SOURCE_OLD) rc = true; break; case AMT_FILTER_D_FWD: if (snode->status == AMT_SOURCE_STATUS_D_FWD && snode->flags == AMT_SOURCE_OLD) rc = true; break; case AMT_FILTER_FWD_NEW: if (snode->status == AMT_SOURCE_STATUS_FWD && snode->flags == AMT_SOURCE_NEW) rc = true; break; case AMT_FILTER_D_FWD_NEW: if (snode->status == AMT_SOURCE_STATUS_D_FWD && snode->flags == AMT_SOURCE_NEW) rc = true; break; case AMT_FILTER_ALL: rc = true; break; case AMT_FILTER_NONE_NEW: if (snode->status == AMT_SOURCE_STATUS_NONE && snode->flags == AMT_SOURCE_NEW) rc = true; break; case AMT_FILTER_BOTH: if ((snode->status == AMT_SOURCE_STATUS_D_FWD || snode->status == AMT_SOURCE_STATUS_FWD) && snode->flags == AMT_SOURCE_OLD) rc = true; break; case AMT_FILTER_BOTH_NEW: if ((snode->status == AMT_SOURCE_STATUS_D_FWD || snode->status == AMT_SOURCE_STATUS_FWD) && snode->flags == AMT_SOURCE_NEW) rc = true; break; default: WARN_ON_ONCE(1); break; } return rc; } static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, enum amt_filter filter, union amt_addr *src) { u32 hash = amt_source_hash(tunnel, src); struct amt_source_node *snode; hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node) if (amt_status_filter(snode, filter) && amt_addr_equal(&snode->source_addr, src)) return snode; return NULL; } static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group) { u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed); return reciprocal_scale(hash, tunnel->amt->hash_buckets); } static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel, union amt_addr *group, union amt_addr *host, bool v6) { u32 hash = amt_group_hash(tunnel, group); struct amt_group_node *gnode; hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) { if (amt_addr_equal(&gnode->group_addr, group) && amt_addr_equal(&gnode->host_addr, host) && gnode->v6 == v6) return gnode; } return NULL; } static void amt_destroy_source(struct amt_source_node *snode) { struct amt_group_node *gnode = snode->gnode; struct amt_tunnel_list *tunnel; tunnel = gnode->tunnel_list; if (!gnode->v6) { netdev_dbg(snode->gnode->amt->dev, "Delete source %pI4 from %pI4\n", &snode->source_addr.ip4, &gnode->group_addr.ip4); #if IS_ENABLED(CONFIG_IPV6) } else { netdev_dbg(snode->gnode->amt->dev, "Delete source %pI6 from %pI6\n", &snode->source_addr.ip6, &gnode->group_addr.ip6); #endif } cancel_delayed_work(&snode->source_timer); hlist_del_init_rcu(&snode->node); tunnel->nr_sources--; gnode->nr_sources--; spin_lock_bh(&source_gc_lock); hlist_add_head_rcu(&snode->node, &source_gc_list); spin_unlock_bh(&source_gc_lock); } static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode) { struct amt_source_node *snode; struct hlist_node *t; int i; if (cancel_delayed_work(&gnode->group_timer)) dev_put(amt->dev); hlist_del_rcu(&gnode->node); gnode->tunnel_list->nr_groups--; if (!gnode->v6) netdev_dbg(amt->dev, "Leave group %pI4\n", &gnode->group_addr.ip4); #if IS_ENABLED(CONFIG_IPV6) else netdev_dbg(amt->dev, "Leave group %pI6\n", &gnode->group_addr.ip6); #endif for (i = 0; i < amt->hash_buckets; i++) hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) amt_destroy_source(snode); /* tunnel->lock was acquired outside of amt_del_group() * But rcu_read_lock() was acquired too so It's safe. */ kfree_rcu(gnode, rcu); } /* If a source timer expires with a router filter-mode for the group of * INCLUDE, the router concludes that traffic from this particular * source is no longer desired on the attached network, and deletes the * associated source record. */ static void amt_source_work(struct work_struct *work) { struct amt_source_node *snode = container_of(to_delayed_work(work), struct amt_source_node, source_timer); struct amt_group_node *gnode = snode->gnode; struct amt_dev *amt = gnode->amt; struct amt_tunnel_list *tunnel; tunnel = gnode->tunnel_list; spin_lock_bh(&tunnel->lock); rcu_read_lock(); if (gnode->filter_mode == MCAST_INCLUDE) { amt_destroy_source(snode); if (!gnode->nr_sources) amt_del_group(amt, gnode); } else { /* When a router filter-mode for a group is EXCLUDE, * source records are only deleted when the group timer expires */ snode->status = AMT_SOURCE_STATUS_D_FWD; } rcu_read_unlock(); spin_unlock_bh(&tunnel->lock); } static void amt_act_src(struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, struct amt_source_node *snode, enum amt_act act) { struct amt_dev *amt = tunnel->amt; switch (act) { case AMT_ACT_GMI: mod_delayed_work(amt_wq, &snode->source_timer, msecs_to_jiffies(amt_gmi(amt))); break; case AMT_ACT_GMI_ZERO: cancel_delayed_work(&snode->source_timer); break; case AMT_ACT_GT: mod_delayed_work(amt_wq, &snode->source_timer, gnode->group_timer.timer.expires); break; case AMT_ACT_STATUS_FWD_NEW: snode->status = AMT_SOURCE_STATUS_FWD; snode->flags = AMT_SOURCE_NEW; break; case AMT_ACT_STATUS_D_FWD_NEW: snode->status = AMT_SOURCE_STATUS_D_FWD; snode->flags = AMT_SOURCE_NEW; break; case AMT_ACT_STATUS_NONE_NEW: cancel_delayed_work(&snode->source_timer); snode->status = AMT_SOURCE_STATUS_NONE; snode->flags = AMT_SOURCE_NEW; break; default: WARN_ON_ONCE(1); return; } if (!gnode->v6) netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n", &snode->source_addr.ip4, &gnode->group_addr.ip4, action_str[act]); #if IS_ENABLED(CONFIG_IPV6) else netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n", &snode->source_addr.ip6, &gnode->group_addr.ip6, action_str[act]); #endif } static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode, union amt_addr *src) { struct amt_source_node *snode; snode = kzalloc(sizeof(*snode), GFP_ATOMIC); if (!snode) return NULL; memcpy(&snode->source_addr, src, sizeof(union amt_addr)); snode->gnode = gnode; snode->status = AMT_SOURCE_STATUS_NONE; snode->flags = AMT_SOURCE_NEW; INIT_HLIST_NODE(&snode->node); INIT_DELAYED_WORK(&snode->source_timer, amt_source_work); return snode; } /* RFC 3810 - 7.2.2. Definition of Filter Timers * * Router Mode Filter Timer Actions/Comments * ----------- ----------------- ---------------- * * INCLUDE Not Used All listeners in * INCLUDE mode. * * EXCLUDE Timer > 0 At least one listener * in EXCLUDE mode. * * EXCLUDE Timer == 0 No more listeners in * EXCLUDE mode for the * multicast address. * If the Requested List * is empty, delete * Multicast Address * Record. If not, switch * to INCLUDE filter mode; * the sources in the * Requested List are * moved to the Include * List, and the Exclude * List is deleted. */ static void amt_group_work(struct work_struct *work) { struct amt_group_node *gnode = container_of(to_delayed_work(work), struct amt_group_node, group_timer); struct amt_tunnel_list *tunnel = gnode->tunnel_list; struct amt_dev *amt = gnode->amt; struct amt_source_node *snode; bool delete_group = true; struct hlist_node *t; int i, buckets; buckets = amt->hash_buckets; spin_lock_bh(&tunnel->lock); if (gnode->filter_mode == MCAST_INCLUDE) { /* Not Used */ spin_unlock_bh(&tunnel->lock); goto out; } rcu_read_lock(); for (i = 0; i < buckets; i++) { hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) { if (!delayed_work_pending(&snode->source_timer) || snode->status == AMT_SOURCE_STATUS_D_FWD) { amt_destroy_source(snode); } else { delete_group = false; snode->status = AMT_SOURCE_STATUS_FWD; } } } if (delete_group) amt_del_group(amt, gnode); else gnode->filter_mode = MCAST_INCLUDE; rcu_read_unlock(); spin_unlock_bh(&tunnel->lock); out: dev_put(amt->dev); } /* Non-existent group is created as INCLUDE {empty}: * * RFC 3376 - 5.1. Action on Change of Interface State * * If no interface state existed for that multicast address before * the change (i.e., the change consisted of creating a new * per-interface record), or if no state exists after the change * (i.e., the change consisted of deleting a per-interface record), * then the "non-existent" state is considered to have a filter mode * of INCLUDE and an empty source list. */ static struct amt_group_node *amt_add_group(struct amt_dev *amt, struct amt_tunnel_list *tunnel, union amt_addr *group, union amt_addr *host, bool v6) { struct amt_group_node *gnode; u32 hash; int i; if (tunnel->nr_groups >= amt->max_groups) return ERR_PTR(-ENOSPC); gnode = kzalloc(sizeof(*gnode) + (sizeof(struct hlist_head) * amt->hash_buckets), GFP_ATOMIC); if (unlikely(!gnode)) return ERR_PTR(-ENOMEM); gnode->amt = amt; gnode->group_addr = *group; gnode->host_addr = *host; gnode->v6 = v6; gnode->tunnel_list = tunnel; gnode->filter_mode = MCAST_INCLUDE; INIT_HLIST_NODE(&gnode->node); INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work); for (i = 0; i < amt->hash_buckets; i++) INIT_HLIST_HEAD(&gnode->sources[i]); hash = amt_group_hash(tunnel, group); hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]); tunnel->nr_groups++; if (!gnode->v6) netdev_dbg(amt->dev, "Join group %pI4\n", &gnode->group_addr.ip4); #if IS_ENABLED(CONFIG_IPV6) else netdev_dbg(amt->dev, "Join group %pI6\n", &gnode->group_addr.ip6); #endif return gnode; } static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt) { u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 }; int hlen = LL_RESERVED_SPACE(amt->dev); int tlen = amt->dev->needed_tailroom; struct igmpv3_query *ihv3; void *csum_start = NULL; __sum16 *csum = NULL; struct sk_buff *skb; struct ethhdr *eth; struct iphdr *iph; unsigned int len; int offset; len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3); skb = netdev_alloc_skb_ip_align(amt->dev, len); if (!skb) return NULL; skb_reserve(skb, hlen); skb_push(skb, sizeof(*eth)); skb->protocol = htons(ETH_P_IP); skb_reset_mac_header(skb); skb->priority = TC_PRIO_CONTROL; skb_put(skb, sizeof(*iph)); skb_put_data(skb, ra, sizeof(ra)); skb_put(skb, sizeof(*ihv3)); skb_pull(skb, sizeof(*eth)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2; iph->tos = AMT_TOS; iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3)); iph->frag_off = htons(IP_DF); iph->ttl = 1; iph->id = 0; iph->protocol = IPPROTO_IGMP; iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); iph->saddr = htonl(INADDR_ANY); ip_send_check(iph); eth = eth_hdr(skb); ether_addr_copy(eth->h_source, amt->dev->dev_addr); ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest); eth->h_proto = htons(ETH_P_IP); ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); skb_reset_transport_header(skb); ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; ihv3->code = 1; ihv3->group = 0; ihv3->qqic = amt->qi; ihv3->nsrcs = 0; ihv3->resv = 0; ihv3->suppress = false; ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv); ihv3->csum = 0; csum = &ihv3->csum; csum_start = (void *)ihv3; *csum = ip_compute_csum(csum_start, sizeof(*ihv3)); offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); skb->ip_summed = CHECKSUM_NONE; skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS); return skb; } static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status, bool validate) { if (validate && amt->status >= status) return; netdev_dbg(amt->dev, "Update GW status %s -> %s", status_str[amt->status], status_str[status]); WRITE_ONCE(amt->status, status); } static void __amt_update_relay_status(struct amt_tunnel_list *tunnel, enum amt_status status, bool validate) { if (validate && tunnel->status >= status) return; netdev_dbg(tunnel->amt->dev, "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s", &tunnel->ip4, ntohs(tunnel->source_port), status_str[tunnel->status], status_str[status]); tunnel->status = status; } static void amt_update_relay_status(struct amt_tunnel_list *tunnel, enum amt_status status, bool validate) { spin_lock_bh(&tunnel->lock); __amt_update_relay_status(tunnel, status, validate); spin_unlock_bh(&tunnel->lock); } static void amt_send_discovery(struct amt_dev *amt) { struct amt_header_discovery *amtd; int hlen, tlen, offset; struct socket *sock; struct udphdr *udph; struct sk_buff *skb; struct iphdr *iph; struct rtable *rt; struct flowi4 fl4; u32 len; int err; rcu_read_lock(); sock = rcu_dereference(amt->sock); if (!sock) goto out; if (!netif_running(amt->stream_dev) || !netif_running(amt->dev)) goto out; rt = ip_route_output_ports(amt->net, &fl4, sock->sk, amt->discovery_ip, amt->local_ip, amt->gw_port, amt->relay_port, IPPROTO_UDP, 0, amt->stream_dev->ifindex); if (IS_ERR(rt)) { amt->dev->stats.tx_errors++; goto out; } hlen = LL_RESERVED_SPACE(amt->dev); tlen = amt->dev->needed_tailroom; len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd); skb = netdev_alloc_skb_ip_align(amt->dev, len); if (!skb) { ip_rt_put(rt); amt->dev->stats.tx_errors++; goto out; } skb->priority = TC_PRIO_CONTROL; skb_dst_set(skb, &rt->dst); len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd); skb_reset_network_header(skb); skb_put(skb, len); amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph)); amtd->version = 0; amtd->type = AMT_MSG_DISCOVERY; amtd->reserved = 0; amtd->nonce = amt->nonce; skb_push(skb, sizeof(*udph)); skb_reset_transport_header(skb); udph = udp_hdr(skb); udph->source = amt->gw_port; udph->dest = amt->relay_port; udph->len = htons(sizeof(*udph) + sizeof(*amtd)); udph->check = 0; offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip, sizeof(*udph) + sizeof(*amtd), IPPROTO_UDP, skb->csum); skb_push(skb, sizeof(*iph)); iph = ip_hdr(skb); iph->version = 4; iph->ihl = (sizeof(struct iphdr)) >> 2; iph->tos = AMT_TOS; iph->frag_off = 0; iph->ttl = ip4_dst_hoplimit(&rt->dst); iph->daddr = amt->discovery_ip; iph->saddr = amt->local_ip; iph->protocol = IPPROTO_UDP; iph->tot_len = htons(len); skb->ip_summed = CHECKSUM_NONE; ip_select_ident(amt->net, skb, NULL); ip_send_check(iph); err = ip_local_out(amt->net, sock->sk, skb); if (unlikely(net_xmit_eval(err))) amt->dev->stats.tx_errors++; amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true); out: rcu_read_unlock(); } static void amt_send_request(struct amt_dev *amt, bool v6) { struct amt_header_request *amtrh; int hlen, tlen, offset; struct socket *sock; struct udphdr *udph; struct sk_buff *skb; struct iphdr *iph; struct rtable *rt; struct flowi4 fl4; u32 len; int err; rcu_read_lock(); sock = rcu_dereference(amt->sock); if (!sock) goto out; if (!netif_running(amt->stream_dev) || !netif_running(amt->dev)) goto out; rt = ip_route_output_ports(amt->net, &fl4, sock->sk, amt->remote_ip, amt->local_ip, amt->gw_port, amt->relay_port, IPPROTO_UDP, 0, amt->stream_dev->ifindex); if (IS_ERR(rt)) { amt->dev->stats.tx_errors++; goto out; } hlen = LL_RESERVED_SPACE(amt->dev); tlen = amt->dev->needed_tailroom; len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh); skb = netdev_alloc_skb_ip_align(amt->dev, len); if (!skb) { ip_rt_put(rt); amt->dev->stats.tx_errors++; goto out; } skb->priority = TC_PRIO_CONTROL; skb_dst_set(skb, &rt->dst); len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh); skb_reset_network_header(skb); skb_put(skb, len); amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph)); amtrh->version = 0; amtrh->type = AMT_MSG_REQUEST; amtrh->reserved1 = 0; amtrh->p = v6; amtrh->reserved2 = 0; amtrh->nonce = amt->nonce; skb_push(skb, sizeof(*udph)); skb_reset_transport_header(skb); udph = udp_hdr(skb); udph->source = amt->gw_port; udph->dest = amt->relay_port; udph->len = htons(sizeof(*amtrh) + sizeof(*udph)); udph->check = 0; offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip, sizeof(*udph) + sizeof(*amtrh), IPPROTO_UDP, skb->csum); skb_push(skb, sizeof(*iph)); iph = ip_hdr(skb); iph->version = 4; iph->ihl = (sizeof(struct iphdr)) >> 2; iph->tos = AMT_TOS; iph->frag_off = 0; iph->ttl = ip4_dst_hoplimit(&rt->dst); iph->daddr = amt->remote_ip; iph->saddr = amt->local_ip; iph->protocol = IPPROTO_UDP; iph->tot_len = htons(len); skb->ip_summed = CHECKSUM_NONE; ip_select_ident(amt->net, skb, NULL); ip_send_check(iph); err = ip_local_out(amt->net, sock->sk, skb); if (unlikely(net_xmit_eval(err))) amt->dev->stats.tx_errors++; out: rcu_read_unlock(); } static void amt_send_igmp_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel) { struct sk_buff *skb; skb = amt_build_igmp_gq(amt); if (!skb) return; amt_skb_cb(skb)->tunnel = tunnel; dev_queue_xmit(skb); } #if IS_ENABLED(CONFIG_IPV6) static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt) { u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 }; int hlen = LL_RESERVED_SPACE(amt->dev); int tlen = amt->dev->needed_tailroom; struct mld2_query *mld2q; void *csum_start = NULL; struct ipv6hdr *ip6h; struct sk_buff *skb; struct ethhdr *eth; u32 len; len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q); skb = netdev_alloc_skb_ip_align(amt->dev, len); if (!skb) return NULL; skb_reserve(skb, hlen); skb_push(skb, sizeof(*eth)); skb_reset_mac_header(skb); eth = eth_hdr(skb); skb->priority = TC_PRIO_CONTROL; skb->protocol = htons(ETH_P_IPV6); skb_put_zero(skb, sizeof(*ip6h)); skb_put_data(skb, ra, sizeof(ra)); skb_put_zero(skb, sizeof(*mld2q)); skb_pull(skb, sizeof(*eth)); skb_reset_network_header(skb); ip6h = ipv6_hdr(skb); ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q)); ip6h->nexthdr = NEXTHDR_HOP; ip6h->hop_limit = 1; ip6h->daddr = mld2_all_node; ip6_flow_hdr(ip6h, 0, 0); if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0, &ip6h->saddr)) { amt->dev->stats.tx_errors++; kfree_skb(skb); return NULL; } eth->h_proto = htons(ETH_P_IPV6); ether_addr_copy(eth->h_source, amt->dev->dev_addr); ipv6_eth_mc_map(&mld2_all_node, eth->h_dest); skb_pull(skb, sizeof(*ip6h) + sizeof(ra)); skb_reset_transport_header(skb); mld2q = (struct mld2_query *)icmp6_hdr(skb); mld2q->mld2q_mrc = htons(1); mld2q->mld2q_type = ICMPV6_MGM_QUERY; mld2q->mld2q_code = 0; mld2q->mld2q_cksum = 0; mld2q->mld2q_resv1 = 0; mld2q->mld2q_resv2 = 0; mld2q->mld2q_suppress = 0; mld2q->mld2q_qrv = amt->qrv; mld2q->mld2q_nsrcs = 0; mld2q->mld2q_qqic = amt->qi; csum_start = (void *)mld2q; mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, sizeof(*mld2q), IPPROTO_ICMPV6, csum_partial(csum_start, sizeof(*mld2q), 0)); skb->ip_summed = CHECKSUM_NONE; skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra)); return skb; } static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel) { struct sk_buff *skb; skb = amt_build_mld_gq(amt); if (!skb) return; amt_skb_cb(skb)->tunnel = tunnel; dev_queue_xmit(skb); } #else static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel) { } #endif static bool amt_queue_event(struct amt_dev *amt, enum amt_event event, struct sk_buff *skb) { int index; spin_lock_bh(&amt->lock); if (amt->nr_events >= AMT_MAX_EVENTS) { spin_unlock_bh(&amt->lock); return 1; } index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS; amt->events[index].event = event; amt->events[index].skb = skb; amt->nr_events++; amt->event_idx %= AMT_MAX_EVENTS; queue_work(amt_wq, &amt->event_wq); spin_unlock_bh(&amt->lock); return 0; } static void amt_secret_work(struct work_struct *work) { struct amt_dev *amt = container_of(to_delayed_work(work), struct amt_dev, secret_wq); spin_lock_bh(&amt->lock); get_random_bytes(&amt->key, sizeof(siphash_key_t)); spin_unlock_bh(&amt->lock); mod_delayed_work(amt_wq, &amt->secret_wq, msecs_to_jiffies(AMT_SECRET_TIMEOUT)); } static void amt_event_send_discovery(struct amt_dev *amt) { if (amt->status > AMT_STATUS_SENT_DISCOVERY) goto out; get_random_bytes(&amt->nonce, sizeof(__be32)); amt_send_discovery(amt); out: mod_delayed_work(amt_wq, &amt->discovery_wq, msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); } static void amt_discovery_work(struct work_struct *work) { struct amt_dev *amt = container_of(to_delayed_work(work), struct amt_dev, discovery_wq); if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL)) mod_delayed_work(amt_wq, &amt->discovery_wq, msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); } static void amt_event_send_request(struct amt_dev *amt) { u32 exp; if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT) goto out; if (amt->req_cnt > AMT_MAX_REQ_COUNT) { netdev_dbg(amt->dev, "Gateway is not ready"); amt->qi = AMT_INIT_REQ_TIMEOUT; WRITE_ONCE(amt->ready4, false); WRITE_ONCE(amt->ready6, false); amt->remote_ip = 0; amt_update_gw_status(amt, AMT_STATUS_INIT, false); amt->req_cnt = 0; amt->nonce = 0; goto out; } if (!amt->req_cnt) { WRITE_ONCE(amt->ready4, false); WRITE_ONCE(amt->ready6, false); get_random_bytes(&amt->nonce, sizeof(__be32)); } amt_send_request(amt, false); amt_send_request(amt, true); amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); amt->req_cnt++; out: exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT); mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000)); } static void amt_req_work(struct work_struct *work) { struct amt_dev *amt = container_of(to_delayed_work(work), struct amt_dev, req_wq); if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL)) mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(100)); } static bool amt_send_membership_update(struct amt_dev *amt, struct sk_buff *skb, bool v6) { struct amt_header_membership_update *amtmu; struct socket *sock; struct iphdr *iph; struct flowi4 fl4; struct rtable *rt; int err; sock = rcu_dereference_bh(amt->sock); if (!sock) return true; err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) + sizeof(*iph) + sizeof(struct udphdr)); if (err) return true; skb_reset_inner_headers(skb); memset(&fl4, 0, sizeof(struct flowi4)); fl4.flowi4_oif = amt->stream_dev->ifindex; fl4.daddr = amt->remote_ip; fl4.saddr = amt->local_ip; fl4.flowi4_tos = AMT_TOS; fl4.flowi4_proto = IPPROTO_UDP; rt = ip_route_output_key(amt->net, &fl4); if (IS_ERR(rt)) { netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip); return true; } amtmu = skb_push(skb, sizeof(*amtmu)); amtmu->version = 0; amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE; amtmu->reserved = 0; amtmu->nonce = amt->nonce; amtmu->response_mac = amt->mac; if (!v6) skb_set_inner_protocol(skb, htons(ETH_P_IP)); else skb_set_inner_protocol(skb, htons(ETH_P_IPV6)); udp_tunnel_xmit_skb(rt, sock->sk, skb, fl4.saddr, fl4.daddr, AMT_TOS, ip4_dst_hoplimit(&rt->dst), 0, amt->gw_port, amt->relay_port, false, false); amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true); return false; } static void amt_send_multicast_data(struct amt_dev *amt, const struct sk_buff *oskb, struct amt_tunnel_list *tunnel, bool v6) { struct amt_header_mcast_data *amtmd; struct socket *sock; struct sk_buff *skb; struct iphdr *iph; struct flowi4 fl4; struct rtable *rt; sock = rcu_dereference_bh(amt->sock); if (!sock) return; skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) + sizeof(struct udphdr), 0, GFP_ATOMIC); if (!skb) return; skb_reset_inner_headers(skb); memset(&fl4, 0, sizeof(struct flowi4)); fl4.flowi4_oif = amt->stream_dev->ifindex; fl4.daddr = tunnel->ip4; fl4.saddr = amt->local_ip; fl4.flowi4_proto = IPPROTO_UDP; rt = ip_route_output_key(amt->net, &fl4); if (IS_ERR(rt)) { netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4); kfree_skb(skb); return; } amtmd = skb_push(skb, sizeof(*amtmd)); amtmd->version = 0; amtmd->reserved = 0; amtmd->type = AMT_MSG_MULTICAST_DATA; if (!v6) skb_set_inner_protocol(skb, htons(ETH_P_IP)); else skb_set_inner_protocol(skb, htons(ETH_P_IPV6)); udp_tunnel_xmit_skb(rt, sock->sk, skb, fl4.saddr, fl4.daddr, AMT_TOS, ip4_dst_hoplimit(&rt->dst), 0, amt->relay_port, tunnel->source_port, false, false); } static bool amt_send_membership_query(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel, bool v6) { struct amt_header_membership_query *amtmq; struct socket *sock; struct rtable *rt; struct flowi4 fl4; int err; sock = rcu_dereference_bh(amt->sock); if (!sock) return true; err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) + sizeof(struct iphdr) + sizeof(struct udphdr)); if (err) return true; skb_reset_inner_headers(skb); memset(&fl4, 0, sizeof(struct flowi4)); fl4.flowi4_oif = amt->stream_dev->ifindex; fl4.daddr = tunnel->ip4; fl4.saddr = amt->local_ip; fl4.flowi4_tos = AMT_TOS; fl4.flowi4_proto = IPPROTO_UDP; rt = ip_route_output_key(amt->net, &fl4); if (IS_ERR(rt)) { netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4); return true; } amtmq = skb_push(skb, sizeof(*amtmq)); amtmq->version = 0; amtmq->type = AMT_MSG_MEMBERSHIP_QUERY; amtmq->reserved = 0; amtmq->l = 0; amtmq->g = 0; amtmq->nonce = tunnel->nonce; amtmq->response_mac = tunnel->mac; if (!v6) skb_set_inner_protocol(skb, htons(ETH_P_IP)); else skb_set_inner_protocol(skb, htons(ETH_P_IPV6)); udp_tunnel_xmit_skb(rt, sock->sk, skb, fl4.saddr, fl4.daddr, AMT_TOS, ip4_dst_hoplimit(&rt->dst), 0, amt->relay_port, tunnel->source_port, false, false); amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true); return false; } static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct amt_dev *amt = netdev_priv(dev); struct amt_tunnel_list *tunnel; struct amt_group_node *gnode; union amt_addr group = {0,}; #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ip6h; struct mld_msg *mld; #endif bool report = false; struct igmphdr *ih; bool query = false; struct iphdr *iph; bool data = false; bool v6 = false; u32 hash; iph = ip_hdr(skb); if (iph->version == 4) { if (!ipv4_is_multicast(iph->daddr)) goto free; if (!ip_mc_check_igmp(skb)) { ih = igmp_hdr(skb); switch (ih->type) { case IGMPV3_HOST_MEMBERSHIP_REPORT: case IGMP_HOST_MEMBERSHIP_REPORT: report = true; break; case IGMP_HOST_MEMBERSHIP_QUERY: query = true; break; default: goto free; } } else { data = true; } v6 = false; group.ip4 = iph->daddr; #if IS_ENABLED(CONFIG_IPV6) } else if (iph->version == 6) { ip6h = ipv6_hdr(skb); if (!ipv6_addr_is_multicast(&ip6h->daddr)) goto free; if (!ipv6_mc_check_mld(skb)) { mld = (struct mld_msg *)skb_transport_header(skb); switch (mld->mld_type) { case ICMPV6_MGM_REPORT: case ICMPV6_MLD2_REPORT: report = true; break; case ICMPV6_MGM_QUERY: query = true; break; default: goto free; } } else { data = true; } v6 = true; group.ip6 = ip6h->daddr; #endif } else { dev->stats.tx_errors++; goto free; } if (!pskb_may_pull(skb, sizeof(struct ethhdr))) goto free; skb_pull(skb, sizeof(struct ethhdr)); if (amt->mode == AMT_MODE_GATEWAY) { /* Gateway only passes IGMP/MLD packets */ if (!report) goto free; if ((!v6 && !READ_ONCE(amt->ready4)) || (v6 && !READ_ONCE(amt->ready6))) goto free; if (amt_send_membership_update(amt, skb, v6)) goto free; goto unlock; } else if (amt->mode == AMT_MODE_RELAY) { if (query) { tunnel = amt_skb_cb(skb)->tunnel; if (!tunnel) { WARN_ON(1); goto free; } /* Do not forward unexpected query */ if (amt_send_membership_query(amt, skb, tunnel, v6)) goto free; goto unlock; } if (!data) goto free; list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) { hash = amt_group_hash(tunnel, &group); hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) { if (!v6) { if (gnode->group_addr.ip4 == iph->daddr) goto found; #if IS_ENABLED(CONFIG_IPV6) } else { if (ipv6_addr_equal(&gnode->group_addr.ip6, &ip6h->daddr)) goto found; #endif } } continue; found: amt_send_multicast_data(amt, skb, tunnel, v6); } } dev_kfree_skb(skb); return NETDEV_TX_OK; free: dev_kfree_skb(skb); unlock: dev->stats.tx_dropped++; return NETDEV_TX_OK; } static int amt_parse_type(struct sk_buff *skb) { struct amt_header *amth; if (!pskb_may_pull(skb, sizeof(struct udphdr) + sizeof(struct amt_header))) return -1; amth = (struct amt_header *)(udp_hdr(skb) + 1); if (amth->version != 0) return -1; if (amth->type >= __AMT_MSG_MAX || !amth->type) return -1; return amth->type; } static void amt_clear_groups(struct amt_tunnel_list *tunnel) { struct amt_dev *amt = tunnel->amt; struct amt_group_node *gnode; struct hlist_node *t; int i; spin_lock_bh(&tunnel->lock); rcu_read_lock(); for (i = 0; i < amt->hash_buckets; i++) hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node) amt_del_group(amt, gnode); rcu_read_unlock(); spin_unlock_bh(&tunnel->lock); } static void amt_tunnel_expire(struct work_struct *work) { struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work), struct amt_tunnel_list, gc_wq); struct amt_dev *amt = tunnel->amt; spin_lock_bh(&amt->lock); rcu_read_lock(); list_del_rcu(&tunnel->list); amt->nr_tunnels--; amt_clear_groups(tunnel); rcu_read_unlock(); spin_unlock_bh(&amt->lock); kfree_rcu(tunnel, rcu); } static void amt_cleanup_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel, struct amt_group_node *gnode) { struct amt_source_node *snode; struct hlist_node *t; int i; /* Delete old sources */ for (i = 0; i < amt->hash_buckets; i++) { hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) { if (snode->flags == AMT_SOURCE_OLD) amt_destroy_source(snode); } } /* switch from new to old */ for (i = 0; i < amt->hash_buckets; i++) { hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) { snode->flags = AMT_SOURCE_OLD; if (!gnode->v6) netdev_dbg(snode->gnode->amt->dev, "Add source as OLD %pI4 from %pI4\n", &snode->source_addr.ip4, &gnode->group_addr.ip4); #if IS_ENABLED(CONFIG_IPV6) else netdev_dbg(snode->gnode->amt->dev, "Add source as OLD %pI6 from %pI6\n", &snode->source_addr.ip6, &gnode->group_addr.ip6); #endif } } } static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, void *grec, bool v6) { struct igmpv3_grec *igmp_grec; struct amt_source_node *snode; #if IS_ENABLED(CONFIG_IPV6) struct mld2_grec *mld_grec; #endif union amt_addr src = {0,}; u16 nsrcs; u32 hash; int i; if (!v6) { igmp_grec = grec; nsrcs = ntohs(igmp_grec->grec_nsrcs); } else { #if IS_ENABLED(CONFIG_IPV6) mld_grec = grec; nsrcs = ntohs(mld_grec->grec_nsrcs); #else return; #endif } for (i = 0; i < nsrcs; i++) { if (tunnel->nr_sources >= amt->max_sources) return; if (!v6) src.ip4 = igmp_grec->grec_src[i]; #if IS_ENABLED(CONFIG_IPV6) else memcpy(&src.ip6, &mld_grec->grec_src[i], sizeof(struct in6_addr)); #endif if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src)) continue; snode = amt_alloc_snode(gnode, &src); if (snode) { hash = amt_source_hash(tunnel, &snode->source_addr); hlist_add_head_rcu(&snode->node, &gnode->sources[hash]); tunnel->nr_sources++; gnode->nr_sources++; if (!gnode->v6) netdev_dbg(snode->gnode->amt->dev, "Add source as NEW %pI4 from %pI4\n", &snode->source_addr.ip4, &gnode->group_addr.ip4); #if IS_ENABLED(CONFIG_IPV6) else netdev_dbg(snode->gnode->amt->dev, "Add source as NEW %pI6 from %pI6\n", &snode->source_addr.ip6, &gnode->group_addr.ip6); #endif } } } /* Router State Report Rec'd New Router State * ------------ ------------ ---------------- * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) * * -----------+-----------+-----------+ * | OLD | NEW | * -----------+-----------+-----------+ * FWD | X | X+A | * -----------+-----------+-----------+ * D_FWD | Y | Y-A | * -----------+-----------+-----------+ * NONE | | A | * -----------+-----------+-----------+ * * a) Received sources are NONE/NEW * b) All NONE will be deleted by amt_cleanup_srcs(). * c) All OLD will be deleted by amt_cleanup_srcs(). * d) After delete, NEW source will be switched to OLD. */ static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, void *grec, enum amt_ops ops, enum amt_filter filter, enum amt_act act, bool v6) { struct amt_dev *amt = tunnel->amt; struct amt_source_node *snode; struct igmpv3_grec *igmp_grec; #if IS_ENABLED(CONFIG_IPV6) struct mld2_grec *mld_grec; #endif union amt_addr src = {0,}; struct hlist_node *t; u16 nsrcs; int i, j; if (!v6) { igmp_grec = grec; nsrcs = ntohs(igmp_grec->grec_nsrcs); } else { #if IS_ENABLED(CONFIG_IPV6) mld_grec = grec; nsrcs = ntohs(mld_grec->grec_nsrcs); #else return; #endif } memset(&src, 0, sizeof(union amt_addr)); switch (ops) { case AMT_OPS_INT: /* A*B */ for (i = 0; i < nsrcs; i++) { if (!v6) src.ip4 = igmp_grec->grec_src[i]; #if IS_ENABLED(CONFIG_IPV6) else memcpy(&src.ip6, &mld_grec->grec_src[i], sizeof(struct in6_addr)); #endif snode = amt_lookup_src(tunnel, gnode, filter, &src); if (!snode) continue; amt_act_src(tunnel, gnode, snode, act); } break; case AMT_OPS_UNI: /* A+B */ for (i = 0; i < amt->hash_buckets; i++) { hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) { if (amt_status_filter(snode, filter)) amt_act_src(tunnel, gnode, snode, act); } } for (i = 0; i < nsrcs; i++) { if (!v6) src.ip4 = igmp_grec->grec_src[i]; #if IS_ENABLED(CONFIG_IPV6) else memcpy(&src.ip6, &mld_grec->grec_src[i], sizeof(struct in6_addr)); #endif snode = amt_lookup_src(tunnel, gnode, filter, &src); if (!snode) continue; amt_act_src(tunnel, gnode, snode, act); } break; case AMT_OPS_SUB: /* A-B */ for (i = 0; i < amt->hash_buckets; i++) { hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) { if (!amt_status_filter(snode, filter)) continue; for (j = 0; j < nsrcs; j++) { if (!v6) src.ip4 = igmp_grec->grec_src[j]; #if IS_ENABLED(CONFIG_IPV6) else memcpy(&src.ip6, &mld_grec->grec_src[j], sizeof(struct in6_addr)); #endif if (amt_addr_equal(&snode->source_addr, &src)) goto out_sub; } amt_act_src(tunnel, gnode, snode, act); continue; out_sub:; } } break; case AMT_OPS_SUB_REV: /* B-A */ for (i = 0; i < nsrcs; i++) { if (!v6) src.ip4 = igmp_grec->grec_src[i]; #if IS_ENABLED(CONFIG_IPV6) else memcpy(&src.ip6, &mld_grec->grec_src[i], sizeof(struct in6_addr)); #endif snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src); if (!snode) { snode = amt_lookup_src(tunnel, gnode, filter, &src); if (snode) amt_act_src(tunnel, gnode, snode, act); } } break; default: netdev_dbg(amt->dev, "Invalid type\n"); return; } } static void amt_mcast_is_in_handler(struct amt_dev *amt, struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, void *grec, void *zero_grec, bool v6) { if (gnode->filter_mode == MCAST_INCLUDE) { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI */ /* Update IS_IN (B) as FWD/NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_NONE_NEW, AMT_ACT_STATUS_FWD_NEW, v6); /* Update INCLUDE (A) as NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* (B)=GMI */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_FWD_NEW, AMT_ACT_GMI, v6); } else { /* State Actions * ------------ ------------ ---------------- ------- * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI */ /* Update (A) in (X, Y) as NONE/NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_BOTH, AMT_ACT_STATUS_NONE_NEW, v6); /* Update FWD/OLD as FWD/NEW */ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* Update IS_IN (A) as FWD/NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_NONE_NEW, AMT_ACT_STATUS_FWD_NEW, v6); /* Update EXCLUDE (, Y-A) as D_FWD_NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB, AMT_FILTER_D_FWD, AMT_ACT_STATUS_D_FWD_NEW, v6); } } static void amt_mcast_is_ex_handler(struct amt_dev *amt, struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, void *grec, void *zero_grec, bool v6) { if (gnode->filter_mode == MCAST_INCLUDE) { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 * Delete (A-B) * Group Timer=GMI */ /* EXCLUDE(A*B, ) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* EXCLUDE(, B-A) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, AMT_FILTER_FWD, AMT_ACT_STATUS_D_FWD_NEW, v6); /* (B-A)=0 */ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, AMT_FILTER_D_FWD_NEW, AMT_ACT_GMI_ZERO, v6); /* Group Timer=GMI */ if (!mod_delayed_work(amt_wq, &gnode->group_timer, msecs_to_jiffies(amt_gmi(amt)))) dev_hold(amt->dev); gnode->filter_mode = MCAST_EXCLUDE; /* Delete (A-B) will be worked by amt_cleanup_srcs(). */ } else { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI * Delete (X-A) * Delete (Y-A) * Group Timer=GMI */ /* EXCLUDE (A-Y, ) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, AMT_FILTER_D_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* EXCLUDE (, Y*A ) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_D_FWD, AMT_ACT_STATUS_D_FWD_NEW, v6); /* (A-X-Y)=GMI */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, AMT_FILTER_BOTH_NEW, AMT_ACT_GMI, v6); /* Group Timer=GMI */ if (!mod_delayed_work(amt_wq, &gnode->group_timer, msecs_to_jiffies(amt_gmi(amt)))) dev_hold(amt->dev); /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */ } } static void amt_mcast_to_in_handler(struct amt_dev *amt, struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, void *grec, void *zero_grec, bool v6) { if (gnode->filter_mode == MCAST_INCLUDE) { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI * Send Q(G,A-B) */ /* Update TO_IN (B) sources as FWD/NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_NONE_NEW, AMT_ACT_STATUS_FWD_NEW, v6); /* Update INCLUDE (A) sources as NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* (B)=GMI */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_FWD_NEW, AMT_ACT_GMI, v6); } else { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI * Send Q(G,X-A) * Send Q(G) */ /* Update TO_IN (A) sources as FWD/NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_NONE_NEW, AMT_ACT_STATUS_FWD_NEW, v6); /* Update EXCLUDE(X,) sources as FWD/NEW */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* EXCLUDE (, Y-A) * (A) are already switched to FWD_NEW. * So, D_FWD/OLD -> D_FWD/NEW is okay. */ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, AMT_FILTER_D_FWD, AMT_ACT_STATUS_D_FWD_NEW, v6); /* (A)=GMI * Only FWD_NEW will have (A) sources. */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_FWD_NEW, AMT_ACT_GMI, v6); } } static void amt_mcast_to_ex_handler(struct amt_dev *amt, struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, void *grec, void *zero_grec, bool v6) { if (gnode->filter_mode == MCAST_INCLUDE) { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 * Delete (A-B) * Send Q(G,A*B) * Group Timer=GMI */ /* EXCLUDE (A*B, ) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* EXCLUDE (, B-A) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, AMT_FILTER_FWD, AMT_ACT_STATUS_D_FWD_NEW, v6); /* (B-A)=0 */ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, AMT_FILTER_D_FWD_NEW, AMT_ACT_GMI_ZERO, v6); /* Group Timer=GMI */ if (!mod_delayed_work(amt_wq, &gnode->group_timer, msecs_to_jiffies(amt_gmi(amt)))) dev_hold(amt->dev); gnode->filter_mode = MCAST_EXCLUDE; /* Delete (A-B) will be worked by amt_cleanup_srcs(). */ } else { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer * Delete (X-A) * Delete (Y-A) * Send Q(G,A-Y) * Group Timer=GMI */ /* Update (A-X-Y) as NONE/OLD */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, AMT_FILTER_BOTH, AMT_ACT_GT, v6); /* EXCLUDE (A-Y, ) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, AMT_FILTER_D_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* EXCLUDE (, Y*A) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_D_FWD, AMT_ACT_STATUS_D_FWD_NEW, v6); /* Group Timer=GMI */ if (!mod_delayed_work(amt_wq, &gnode->group_timer, msecs_to_jiffies(amt_gmi(amt)))) dev_hold(amt->dev); /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */ } } static void amt_mcast_allow_handler(struct amt_dev *amt, struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, void *grec, void *zero_grec, bool v6) { if (gnode->filter_mode == MCAST_INCLUDE) { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI */ /* INCLUDE (A+B) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* (B)=GMI */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_FWD_NEW, AMT_ACT_GMI, v6); } else { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI */ /* EXCLUDE (X+A, ) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* EXCLUDE (, Y-A) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB, AMT_FILTER_D_FWD, AMT_ACT_STATUS_D_FWD_NEW, v6); /* (A)=GMI * All (A) source are now FWD/NEW status. */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, AMT_FILTER_FWD_NEW, AMT_ACT_GMI, v6); } } static void amt_mcast_block_handler(struct amt_dev *amt, struct amt_tunnel_list *tunnel, struct amt_group_node *gnode, void *grec, void *zero_grec, bool v6) { if (gnode->filter_mode == MCAST_INCLUDE) { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) */ /* INCLUDE (A) */ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); } else { /* Router State Report Rec'd New Router State Actions * ------------ ------------ ---------------- ------- * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer * Send Q(G,A-Y) */ /* (A-X-Y)=Group Timer */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, AMT_FILTER_BOTH, AMT_ACT_GT, v6); /* EXCLUDE (X, ) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* EXCLUDE (X+(A-Y) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, AMT_FILTER_D_FWD, AMT_ACT_STATUS_FWD_NEW, v6); /* EXCLUDE (, Y) */ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, AMT_FILTER_D_FWD, AMT_ACT_STATUS_D_FWD_NEW, v6); } } /* RFC 3376 * 7.3.2. In the Presence of Older Version Group Members * * When Group Compatibility Mode is IGMPv2, a router internally * translates the following IGMPv2 messages for that group to their * IGMPv3 equivalents: * * IGMPv2 Message IGMPv3 Equivalent * -------------- ----------------- * Report IS_EX( {} ) * Leave TO_IN( {} ) */ static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel) { struct igmphdr *ih = igmp_hdr(skb); struct iphdr *iph = ip_hdr(skb); struct amt_group_node *gnode; union amt_addr group, host; memset(&group, 0, sizeof(union amt_addr)); group.ip4 = ih->group; memset(&host, 0, sizeof(union amt_addr)); host.ip4 = iph->saddr; gnode = amt_lookup_group(tunnel, &group, &host, false); if (!gnode) { gnode = amt_add_group(amt, tunnel, &group, &host, false); if (!IS_ERR(gnode)) { gnode->filter_mode = MCAST_EXCLUDE; if (!mod_delayed_work(amt_wq, &gnode->group_timer, msecs_to_jiffies(amt_gmi(amt)))) dev_hold(amt->dev); } } } /* RFC 3376 * 7.3.2. In the Presence of Older Version Group Members * * When Group Compatibility Mode is IGMPv2, a router internally * translates the following IGMPv2 messages for that group to their * IGMPv3 equivalents: * * IGMPv2 Message IGMPv3 Equivalent * -------------- ----------------- * Report IS_EX( {} ) * Leave TO_IN( {} ) */ static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel) { struct igmphdr *ih = igmp_hdr(skb); struct iphdr *iph = ip_hdr(skb); struct amt_group_node *gnode; union amt_addr group, host; memset(&group, 0, sizeof(union amt_addr)); group.ip4 = ih->group; memset(&host, 0, sizeof(union amt_addr)); host.ip4 = iph->saddr; gnode = amt_lookup_group(tunnel, &group, &host, false); if (gnode) amt_del_group(amt, gnode); } static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel) { struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb); int len = skb_transport_offset(skb) + sizeof(*ihrv3); void *zero_grec = (void *)&igmpv3_zero_grec; struct iphdr *iph = ip_hdr(skb); struct amt_group_node *gnode; union amt_addr group, host; struct igmpv3_grec *grec; u16 nsrcs; int i; for (i = 0; i < ntohs(ihrv3->ngrec); i++) { len += sizeof(*grec); if (!ip_mc_may_pull(skb, len)) break; grec = (void *)(skb->data + len - sizeof(*grec)); nsrcs = ntohs(grec->grec_nsrcs); len += nsrcs * sizeof(__be32); if (!ip_mc_may_pull(skb, len)) break; memset(&group, 0, sizeof(union amt_addr)); group.ip4 = grec->grec_mca; memset(&host, 0, sizeof(union amt_addr)); host.ip4 = iph->saddr; gnode = amt_lookup_group(tunnel, &group, &host, false); if (!gnode) { gnode = amt_add_group(amt, tunnel, &group, &host, false); if (IS_ERR(gnode)) continue; } amt_add_srcs(amt, tunnel, gnode, grec, false); switch (grec->grec_type) { case IGMPV3_MODE_IS_INCLUDE: amt_mcast_is_in_handler(amt, tunnel, gnode, grec, zero_grec, false); break; case IGMPV3_MODE_IS_EXCLUDE: amt_mcast_is_ex_handler(amt, tunnel, gnode, grec, zero_grec, false); break; case IGMPV3_CHANGE_TO_INCLUDE: amt_mcast_to_in_handler(amt, tunnel, gnode, grec, zero_grec, false); break; case IGMPV3_CHANGE_TO_EXCLUDE: amt_mcast_to_ex_handler(amt, tunnel, gnode, grec, zero_grec, false); break; case IGMPV3_ALLOW_NEW_SOURCES: amt_mcast_allow_handler(amt, tunnel, gnode, grec, zero_grec, false); break; case IGMPV3_BLOCK_OLD_SOURCES: amt_mcast_block_handler(amt, tunnel, gnode, grec, zero_grec, false); break; default: break; } amt_cleanup_srcs(amt, tunnel, gnode); } } /* caller held tunnel->lock */ static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel) { struct igmphdr *ih = igmp_hdr(skb); switch (ih->type) { case IGMPV3_HOST_MEMBERSHIP_REPORT: amt_igmpv3_report_handler(amt, skb, tunnel); break; case IGMPV2_HOST_MEMBERSHIP_REPORT: amt_igmpv2_report_handler(amt, skb, tunnel); break; case IGMP_HOST_LEAVE_MESSAGE: amt_igmpv2_leave_handler(amt, skb, tunnel); break; default: break; } } #if IS_ENABLED(CONFIG_IPV6) /* RFC 3810 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners * * When Multicast Address Compatibility Mode is MLDv2, a router acts * using the MLDv2 protocol for that multicast address. When Multicast * Address Compatibility Mode is MLDv1, a router internally translates * the following MLDv1 messages for that multicast address to their * MLDv2 equivalents: * * MLDv1 Message MLDv2 Equivalent * -------------- ----------------- * Report IS_EX( {} ) * Done TO_IN( {} ) */ static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel) { struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb); struct ipv6hdr *ip6h = ipv6_hdr(skb); struct amt_group_node *gnode; union amt_addr group, host; memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr)); memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr)); gnode = amt_lookup_group(tunnel, &group, &host, true); if (!gnode) { gnode = amt_add_group(amt, tunnel, &group, &host, true); if (!IS_ERR(gnode)) { gnode->filter_mode = MCAST_EXCLUDE; if (!mod_delayed_work(amt_wq, &gnode->group_timer, msecs_to_jiffies(amt_gmi(amt)))) dev_hold(amt->dev); } } } /* RFC 3810 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners * * When Multicast Address Compatibility Mode is MLDv2, a router acts * using the MLDv2 protocol for that multicast address. When Multicast * Address Compatibility Mode is MLDv1, a router internally translates * the following MLDv1 messages for that multicast address to their * MLDv2 equivalents: * * MLDv1 Message MLDv2 Equivalent * -------------- ----------------- * Report IS_EX( {} ) * Done TO_IN( {} ) */ static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel) { struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb); struct iphdr *iph = ip_hdr(skb); struct amt_group_node *gnode; union amt_addr group, host; memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr)); memset(&host, 0, sizeof(union amt_addr)); host.ip4 = iph->saddr; gnode = amt_lookup_group(tunnel, &group, &host, true); if (gnode) { amt_del_group(amt, gnode); return; } } static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel) { struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb); int len = skb_transport_offset(skb) + sizeof(*mld2r); void *zero_grec = (void *)&mldv2_zero_grec; struct ipv6hdr *ip6h = ipv6_hdr(skb); struct amt_group_node *gnode; union amt_addr group, host; struct mld2_grec *grec; u16 nsrcs; int i; for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) { len += sizeof(*grec); if (!ipv6_mc_may_pull(skb, len)) break; grec = (void *)(skb->data + len - sizeof(*grec)); nsrcs = ntohs(grec->grec_nsrcs); len += nsrcs * sizeof(struct in6_addr); if (!ipv6_mc_may_pull(skb, len)) break; memset(&group, 0, sizeof(union amt_addr)); group.ip6 = grec->grec_mca; memset(&host, 0, sizeof(union amt_addr)); host.ip6 = ip6h->saddr; gnode = amt_lookup_group(tunnel, &group, &host, true); if (!gnode) { gnode = amt_add_group(amt, tunnel, &group, &host, ETH_P_IPV6); if (IS_ERR(gnode)) continue; } amt_add_srcs(amt, tunnel, gnode, grec, true); switch (grec->grec_type) { case MLD2_MODE_IS_INCLUDE: amt_mcast_is_in_handler(amt, tunnel, gnode, grec, zero_grec, true); break; case MLD2_MODE_IS_EXCLUDE: amt_mcast_is_ex_handler(amt, tunnel, gnode, grec, zero_grec, true); break; case MLD2_CHANGE_TO_INCLUDE: amt_mcast_to_in_handler(amt, tunnel, gnode, grec, zero_grec, true); break; case MLD2_CHANGE_TO_EXCLUDE: amt_mcast_to_ex_handler(amt, tunnel, gnode, grec, zero_grec, true); break; case MLD2_ALLOW_NEW_SOURCES: amt_mcast_allow_handler(amt, tunnel, gnode, grec, zero_grec, true); break; case MLD2_BLOCK_OLD_SOURCES: amt_mcast_block_handler(amt, tunnel, gnode, grec, zero_grec, true); break; default: break; } amt_cleanup_srcs(amt, tunnel, gnode); } } /* caller held tunnel->lock */ static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb, struct amt_tunnel_list *tunnel) { struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb); switch (mld->mld_type) { case ICMPV6_MGM_REPORT: amt_mldv1_report_handler(amt, skb, tunnel); break; case ICMPV6_MLD2_REPORT: amt_mldv2_report_handler(amt, skb, tunnel); break; case ICMPV6_MGM_REDUCTION: amt_mldv1_leave_handler(amt, skb, tunnel); break; default: break; } } #endif static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_advertisement *amta; int hdr_size; hdr_size = sizeof(*amta) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1); if (!amta->ip4) return true; if (amta->reserved || amta->version) return true; if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) || ipv4_is_zeronet(amta->ip4)) return true; if (amt->status != AMT_STATUS_SENT_DISCOVERY || amt->nonce != amta->nonce) return true; amt->remote_ip = amta->ip4; netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip); mod_delayed_work(amt_wq, &amt->req_wq, 0); amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true); return false; } static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_mcast_data *amtmd; int hdr_size, len, err; struct ethhdr *eth; struct iphdr *iph; if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE) return true; hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1); if (amtmd->reserved || amtmd->version) return true; if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false)) return true; skb_reset_network_header(skb); skb_push(skb, sizeof(*eth)); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); eth = eth_hdr(skb); if (!pskb_may_pull(skb, sizeof(*iph))) return true; iph = ip_hdr(skb); if (iph->version == 4) { if (!ipv4_is_multicast(iph->daddr)) return true; skb->protocol = htons(ETH_P_IP); eth->h_proto = htons(ETH_P_IP); ip_eth_mc_map(iph->daddr, eth->h_dest); #if IS_ENABLED(CONFIG_IPV6) } else if (iph->version == 6) { struct ipv6hdr *ip6h; if (!pskb_may_pull(skb, sizeof(*ip6h))) return true; ip6h = ipv6_hdr(skb); if (!ipv6_addr_is_multicast(&ip6h->daddr)) return true; skb->protocol = htons(ETH_P_IPV6); eth->h_proto = htons(ETH_P_IPV6); ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); #endif } else { return true; } skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; err = gro_cells_receive(&amt->gro_cells, skb); if (likely(err == NET_RX_SUCCESS)) dev_sw_netstats_rx_add(amt->dev, len); else amt->dev->stats.rx_dropped++; return false; } static bool amt_membership_query_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_membership_query *amtmq; struct igmpv3_query *ihv3; struct ethhdr *eth, *oeth; struct iphdr *iph; int hdr_size, len; hdr_size = sizeof(*amtmq) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1); if (amtmq->reserved || amtmq->version) return true; if (amtmq->nonce != amt->nonce) return true; hdr_size -= sizeof(*eth); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) return true; oeth = eth_hdr(skb); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); skb_reset_network_header(skb); eth = eth_hdr(skb); if (!pskb_may_pull(skb, sizeof(*iph))) return true; iph = ip_hdr(skb); if (iph->version == 4) { if (READ_ONCE(amt->ready4)) return true; if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3))) return true; if (!ipv4_is_multicast(iph->daddr)) return true; ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); WRITE_ONCE(amt->ready4, true); amt->mac = amtmq->response_mac; amt->req_cnt = 0; amt->qi = ihv3->qqic; skb->protocol = htons(ETH_P_IP); eth->h_proto = htons(ETH_P_IP); ip_eth_mc_map(iph->daddr, eth->h_dest); #if IS_ENABLED(CONFIG_IPV6) } else if (iph->version == 6) { struct mld2_query *mld2q; struct ipv6hdr *ip6h; if (READ_ONCE(amt->ready6)) return true; if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + sizeof(*mld2q))) return true; ip6h = ipv6_hdr(skb); if (!ipv6_addr_is_multicast(&ip6h->daddr)) return true; mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); WRITE_ONCE(amt->ready6, true); amt->mac = amtmq->response_mac; amt->req_cnt = 0; amt->qi = mld2q->mld2q_qqic; skb->protocol = htons(ETH_P_IPV6); eth->h_proto = htons(ETH_P_IPV6); ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); #endif } else { return true; } ether_addr_copy(eth->h_source, oeth->h_source); skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; local_bh_disable(); if (__netif_rx(skb) == NET_RX_SUCCESS) { amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true); dev_sw_netstats_rx_add(amt->dev, len); } else { amt->dev->stats.rx_dropped++; } local_bh_enable(); return false; } static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_membership_update *amtmu; struct amt_tunnel_list *tunnel; struct ethhdr *eth; struct iphdr *iph; int len, hdr_size; iph = ip_hdr(skb); hdr_size = sizeof(*amtmu) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1); if (amtmu->reserved || amtmu->version) return true; if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false)) return true; skb_reset_network_header(skb); list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) { if (tunnel->ip4 == iph->saddr) { if ((amtmu->nonce == tunnel->nonce && amtmu->response_mac == tunnel->mac)) { mod_delayed_work(amt_wq, &tunnel->gc_wq, msecs_to_jiffies(amt_gmi(amt)) * 3); goto report; } else { netdev_dbg(amt->dev, "Invalid MAC\n"); return true; } } } return true; report: if (!pskb_may_pull(skb, sizeof(*iph))) return true; iph = ip_hdr(skb); if (iph->version == 4) { if (ip_mc_check_igmp(skb)) { netdev_dbg(amt->dev, "Invalid IGMP\n"); return true; } spin_lock_bh(&tunnel->lock); amt_igmp_report_handler(amt, skb, tunnel); spin_unlock_bh(&tunnel->lock); skb_push(skb, sizeof(struct ethhdr)); skb_reset_mac_header(skb); eth = eth_hdr(skb); skb->protocol = htons(ETH_P_IP); eth->h_proto = htons(ETH_P_IP); ip_eth_mc_map(iph->daddr, eth->h_dest); #if IS_ENABLED(CONFIG_IPV6) } else if (iph->version == 6) { struct ipv6hdr *ip6h = ipv6_hdr(skb); if (ipv6_mc_check_mld(skb)) { netdev_dbg(amt->dev, "Invalid MLD\n"); return true; } spin_lock_bh(&tunnel->lock); amt_mld_report_handler(amt, skb, tunnel); spin_unlock_bh(&tunnel->lock); skb_push(skb, sizeof(struct ethhdr)); skb_reset_mac_header(skb); eth = eth_hdr(skb); skb->protocol = htons(ETH_P_IPV6); eth->h_proto = htons(ETH_P_IPV6); ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); #endif } else { netdev_dbg(amt->dev, "Unsupported Protocol\n"); return true; } skb_pull(skb, sizeof(struct ethhdr)); skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; if (__netif_rx(skb) == NET_RX_SUCCESS) { amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE, true); dev_sw_netstats_rx_add(amt->dev, len); } else { amt->dev->stats.rx_dropped++; } return false; } static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce, __be32 daddr, __be16 dport) { struct amt_header_advertisement *amta; int hlen, tlen, offset; struct socket *sock; struct udphdr *udph; struct sk_buff *skb; struct iphdr *iph; struct rtable *rt; struct flowi4 fl4; u32 len; int err; rcu_read_lock(); sock = rcu_dereference(amt->sock); if (!sock) goto out; if (!netif_running(amt->stream_dev) || !netif_running(amt->dev)) goto out; rt = ip_route_output_ports(amt->net, &fl4, sock->sk, daddr, amt->local_ip, dport, amt->relay_port, IPPROTO_UDP, 0, amt->stream_dev->ifindex); if (IS_ERR(rt)) { amt->dev->stats.tx_errors++; goto out; } hlen = LL_RESERVED_SPACE(amt->dev); tlen = amt->dev->needed_tailroom; len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta); skb = netdev_alloc_skb_ip_align(amt->dev, len); if (!skb) { ip_rt_put(rt); amt->dev->stats.tx_errors++; goto out; } skb->priority = TC_PRIO_CONTROL; skb_dst_set(skb, &rt->dst); len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta); skb_reset_network_header(skb); skb_put(skb, len); amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph)); amta->version = 0; amta->type = AMT_MSG_ADVERTISEMENT; amta->reserved = 0; amta->nonce = nonce; amta->ip4 = amt->local_ip; skb_push(skb, sizeof(*udph)); skb_reset_transport_header(skb); udph = udp_hdr(skb); udph->source = amt->relay_port; udph->dest = dport; udph->len = htons(sizeof(*amta) + sizeof(*udph)); udph->check = 0; offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); udph->check = csum_tcpudp_magic(amt->local_ip, daddr, sizeof(*udph) + sizeof(*amta), IPPROTO_UDP, skb->csum); skb_push(skb, sizeof(*iph)); iph = ip_hdr(skb); iph->version = 4; iph->ihl = (sizeof(struct iphdr)) >> 2; iph->tos = AMT_TOS; iph->frag_off = 0; iph->ttl = ip4_dst_hoplimit(&rt->dst); iph->daddr = daddr; iph->saddr = amt->local_ip; iph->protocol = IPPROTO_UDP; iph->tot_len = htons(len); skb->ip_summed = CHECKSUM_NONE; ip_select_ident(amt->net, skb, NULL); ip_send_check(iph); err = ip_local_out(amt->net, sock->sk, skb); if (unlikely(net_xmit_eval(err))) amt->dev->stats.tx_errors++; out: rcu_read_unlock(); } static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_discovery *amtd; struct udphdr *udph; struct iphdr *iph; if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd))) return true; iph = ip_hdr(skb); udph = udp_hdr(skb); amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1); if (amtd->reserved || amtd->version) return true; amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source); return false; } static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_request *amtrh; struct amt_tunnel_list *tunnel; unsigned long long key; struct udphdr *udph; struct iphdr *iph; u64 mac; int i; if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh))) return true; iph = ip_hdr(skb); udph = udp_hdr(skb); amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1); if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version) return true; list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) if (tunnel->ip4 == iph->saddr) goto send; spin_lock_bh(&amt->lock); if (amt->nr_tunnels >= amt->max_tunnels) { spin_unlock_bh(&amt->lock); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); return true; } tunnel = kzalloc(sizeof(*tunnel) + (sizeof(struct hlist_head) * amt->hash_buckets), GFP_ATOMIC); if (!tunnel) { spin_unlock_bh(&amt->lock); return true; } tunnel->source_port = udph->source; tunnel->ip4 = iph->saddr; memcpy(&key, &tunnel->key, sizeof(unsigned long long)); tunnel->amt = amt; spin_lock_init(&tunnel->lock); for (i = 0; i < amt->hash_buckets; i++) INIT_HLIST_HEAD(&tunnel->groups[i]); INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire); list_add_tail_rcu(&tunnel->list, &amt->tunnel_list); tunnel->key = amt->key; __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true); amt->nr_tunnels++; mod_delayed_work(amt_wq, &tunnel->gc_wq, msecs_to_jiffies(amt_gmi(amt))); spin_unlock_bh(&amt->lock); send: tunnel->nonce = amtrh->nonce; mac = siphash_3u32((__force u32)tunnel->ip4, (__force u32)tunnel->source_port, (__force u32)tunnel->nonce, &tunnel->key); tunnel->mac = mac >> 16; if (!netif_running(amt->dev) || !netif_running(amt->stream_dev)) return true; if (!amtrh->p) amt_send_igmp_gq(amt, tunnel); else amt_send_mld_gq(amt, tunnel); return false; } static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb) { int type = amt_parse_type(skb); int err = 1; if (type == -1) goto drop; if (amt->mode == AMT_MODE_GATEWAY) { switch (type) { case AMT_MSG_ADVERTISEMENT: err = amt_advertisement_handler(amt, skb); break; case AMT_MSG_MEMBERSHIP_QUERY: err = amt_membership_query_handler(amt, skb); if (!err) return; break; default: netdev_dbg(amt->dev, "Invalid type of Gateway\n"); break; } } drop: if (err) { amt->dev->stats.rx_dropped++; kfree_skb(skb); } else { consume_skb(skb); } } static int amt_rcv(struct sock *sk, struct sk_buff *skb) { struct amt_dev *amt; struct iphdr *iph; int type; bool err; rcu_read_lock_bh(); amt = rcu_dereference_sk_user_data(sk); if (!amt) { err = true; kfree_skb(skb); goto out; } skb->dev = amt->dev; iph = ip_hdr(skb); type = amt_parse_type(skb); if (type == -1) { err = true; goto drop; } if (amt->mode == AMT_MODE_GATEWAY) { switch (type) { case AMT_MSG_ADVERTISEMENT: if (iph->saddr != amt->discovery_ip) { netdev_dbg(amt->dev, "Invalid Relay IP\n"); err = true; goto drop; } if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) { netdev_dbg(amt->dev, "AMT Event queue full\n"); err = true; goto drop; } goto out; case AMT_MSG_MULTICAST_DATA: if (iph->saddr != amt->remote_ip) { netdev_dbg(amt->dev, "Invalid Relay IP\n"); err = true; goto drop; } err = amt_multicast_data_handler(amt, skb); if (err) goto drop; else goto out; case AMT_MSG_MEMBERSHIP_QUERY: if (iph->saddr != amt->remote_ip) { netdev_dbg(amt->dev, "Invalid Relay IP\n"); err = true; goto drop; } if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) { netdev_dbg(amt->dev, "AMT Event queue full\n"); err = true; goto drop; } goto out; default: err = true; netdev_dbg(amt->dev, "Invalid type of Gateway\n"); break; } } else { switch (type) { case AMT_MSG_DISCOVERY: err = amt_discovery_handler(amt, skb); break; case AMT_MSG_REQUEST: err = amt_request_handler(amt, skb); break; case AMT_MSG_MEMBERSHIP_UPDATE: err = amt_update_handler(amt, skb); if (err) goto drop; else goto out; default: err = true; netdev_dbg(amt->dev, "Invalid type of relay\n"); break; } } drop: if (err) { amt->dev->stats.rx_dropped++; kfree_skb(skb); } else { consume_skb(skb); } out: rcu_read_unlock_bh(); return 0; } static void amt_event_work(struct work_struct *work) { struct amt_dev *amt = container_of(work, struct amt_dev, event_wq); struct sk_buff *skb; u8 event; int i; for (i = 0; i < AMT_MAX_EVENTS; i++) { spin_lock_bh(&amt->lock); if (amt->nr_events == 0) { spin_unlock_bh(&amt->lock); return; } event = amt->events[amt->event_idx].event; skb = amt->events[amt->event_idx].skb; amt->events[amt->event_idx].event = AMT_EVENT_NONE; amt->events[amt->event_idx].skb = NULL; amt->nr_events--; amt->event_idx++; amt->event_idx %= AMT_MAX_EVENTS; spin_unlock_bh(&amt->lock); switch (event) { case AMT_EVENT_RECEIVE: amt_gw_rcv(amt, skb); break; case AMT_EVENT_SEND_DISCOVERY: amt_event_send_discovery(amt); break; case AMT_EVENT_SEND_REQUEST: amt_event_send_request(amt); break; default: kfree_skb(skb); break; } } } static int amt_err_lookup(struct sock *sk, struct sk_buff *skb) { struct amt_dev *amt; int type; rcu_read_lock_bh(); amt = rcu_dereference_sk_user_data(sk); if (!amt) goto out; if (amt->mode != AMT_MODE_GATEWAY) goto drop; type = amt_parse_type(skb); if (type == -1) goto drop; netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n", type_str[type]); switch (type) { case AMT_MSG_DISCOVERY: break; case AMT_MSG_REQUEST: case AMT_MSG_MEMBERSHIP_UPDATE: if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT) mod_delayed_work(amt_wq, &amt->req_wq, 0); break; default: goto drop; } out: rcu_read_unlock_bh(); return 0; drop: rcu_read_unlock_bh(); amt->dev->stats.rx_dropped++; return 0; } static struct socket *amt_create_sock(struct net *net, __be16 port) { struct udp_port_cfg udp_conf; struct socket *sock; int err; memset(&udp_conf, 0, sizeof(udp_conf)); udp_conf.family = AF_INET; udp_conf.local_ip.s_addr = htonl(INADDR_ANY); udp_conf.local_udp_port = port; err = udp_sock_create(net, &udp_conf, &sock); if (err < 0) return ERR_PTR(err); return sock; } static int amt_socket_create(struct amt_dev *amt) { struct udp_tunnel_sock_cfg tunnel_cfg; struct socket *sock; sock = amt_create_sock(amt->net, amt->relay_port); if (IS_ERR(sock)) return PTR_ERR(sock); /* Mark socket as an encapsulation socket */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); tunnel_cfg.sk_user_data = amt; tunnel_cfg.encap_type = 1; tunnel_cfg.encap_rcv = amt_rcv; tunnel_cfg.encap_err_lookup = amt_err_lookup; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg); rcu_assign_pointer(amt->sock, sock); return 0; } static int amt_dev_open(struct net_device *dev) { struct amt_dev *amt = netdev_priv(dev); int err; amt->ready4 = false; amt->ready6 = false; amt->event_idx = 0; amt->nr_events = 0; err = amt_socket_create(amt); if (err) return err; amt->req_cnt = 0; amt->remote_ip = 0; amt->nonce = 0; get_random_bytes(&amt->key, sizeof(siphash_key_t)); amt->status = AMT_STATUS_INIT; if (amt->mode == AMT_MODE_GATEWAY) { mod_delayed_work(amt_wq, &amt->discovery_wq, 0); mod_delayed_work(amt_wq, &amt->req_wq, 0); } else if (amt->mode == AMT_MODE_RELAY) { mod_delayed_work(amt_wq, &amt->secret_wq, msecs_to_jiffies(AMT_SECRET_TIMEOUT)); } return err; } static int amt_dev_stop(struct net_device *dev) { struct amt_dev *amt = netdev_priv(dev); struct amt_tunnel_list *tunnel, *tmp; struct socket *sock; struct sk_buff *skb; int i; cancel_delayed_work_sync(&amt->req_wq); cancel_delayed_work_sync(&amt->discovery_wq); cancel_delayed_work_sync(&amt->secret_wq); /* shutdown */ sock = rtnl_dereference(amt->sock); RCU_INIT_POINTER(amt->sock, NULL); synchronize_net(); if (sock) udp_tunnel_sock_release(sock); cancel_work_sync(&amt->event_wq); for (i = 0; i < AMT_MAX_EVENTS; i++) { skb = amt->events[i].skb; kfree_skb(skb); amt->events[i].event = AMT_EVENT_NONE; amt->events[i].skb = NULL; } amt->ready4 = false; amt->ready6 = false; amt->req_cnt = 0; amt->remote_ip = 0; list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) { list_del_rcu(&tunnel->list); amt->nr_tunnels--; cancel_delayed_work_sync(&tunnel->gc_wq); amt_clear_groups(tunnel); kfree_rcu(tunnel, rcu); } return 0; } static const struct device_type amt_type = { .name = "amt", }; static int amt_dev_init(struct net_device *dev) { struct amt_dev *amt = netdev_priv(dev); int err; amt->dev = dev; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; err = gro_cells_init(&amt->gro_cells, dev); if (err) { free_percpu(dev->tstats); return err; } return 0; } static void amt_dev_uninit(struct net_device *dev) { struct amt_dev *amt = netdev_priv(dev); gro_cells_destroy(&amt->gro_cells); free_percpu(dev->tstats); } static const struct net_device_ops amt_netdev_ops = { .ndo_init = amt_dev_init, .ndo_uninit = amt_dev_uninit, .ndo_open = amt_dev_open, .ndo_stop = amt_dev_stop, .ndo_start_xmit = amt_dev_xmit, .ndo_get_stats64 = dev_get_tstats64, }; static void amt_link_setup(struct net_device *dev) { dev->netdev_ops = &amt_netdev_ops; dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &amt_type); dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = ETH_MAX_MTU; dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->hard_header_len = 0; dev->addr_len = 0; dev->priv_flags |= IFF_NO_QUEUE; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GSO_SOFTWARE; dev->features |= NETIF_F_NETNS_LOCAL; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; eth_hw_addr_random(dev); eth_zero_addr(dev->broadcast); ether_setup(dev); } static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = { [IFLA_AMT_MODE] = { .type = NLA_U32 }, [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 }, [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 }, [IFLA_AMT_LINK] = { .type = NLA_U32 }, [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) }, [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) }, [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) }, [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 }, }; static int amt_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (!data) return -EINVAL; if (!data[IFLA_AMT_LINK]) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK], "Link attribute is required"); return -EINVAL; } if (!data[IFLA_AMT_MODE]) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE], "Mode attribute is required"); return -EINVAL; } if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE], "Mode attribute is not valid"); return -EINVAL; } if (!data[IFLA_AMT_LOCAL_IP]) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP], "Local attribute is required"); return -EINVAL; } if (!data[IFLA_AMT_DISCOVERY_IP] && nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP], "Discovery attribute is required"); return -EINVAL; } return 0; } static int amt_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct amt_dev *amt = netdev_priv(dev); int err = -EINVAL; amt->net = net; amt->mode = nla_get_u32(data[IFLA_AMT_MODE]); if (data[IFLA_AMT_MAX_TUNNELS] && nla_get_u32(data[IFLA_AMT_MAX_TUNNELS])) amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]); else amt->max_tunnels = AMT_MAX_TUNNELS; spin_lock_init(&amt->lock); amt->max_groups = AMT_MAX_GROUP; amt->max_sources = AMT_MAX_SOURCE; amt->hash_buckets = AMT_HSIZE; amt->nr_tunnels = 0; get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed)); amt->stream_dev = dev_get_by_index(net, nla_get_u32(data[IFLA_AMT_LINK])); if (!amt->stream_dev) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK], "Can't find stream device"); return -ENODEV; } if (amt->stream_dev->type != ARPHRD_ETHER) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK], "Invalid stream device type"); goto err; } amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]); if (ipv4_is_loopback(amt->local_ip) || ipv4_is_zeronet(amt->local_ip) || ipv4_is_multicast(amt->local_ip)) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP], "Invalid Local address"); goto err; } if (data[IFLA_AMT_RELAY_PORT]) amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]); else amt->relay_port = htons(IANA_AMT_UDP_PORT); if (data[IFLA_AMT_GATEWAY_PORT]) amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]); else amt->gw_port = htons(IANA_AMT_UDP_PORT); if (!amt->relay_port) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP], "relay port must not be 0"); goto err; } if (amt->mode == AMT_MODE_RELAY) { amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv); amt->qri = 10; dev->needed_headroom = amt->stream_dev->needed_headroom + AMT_RELAY_HLEN; dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN; dev->max_mtu = dev->mtu; dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN; } else { if (!data[IFLA_AMT_DISCOVERY_IP]) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP], "discovery must be set in gateway mode"); goto err; } if (!amt->gw_port) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP], "gateway port must not be 0"); goto err; } amt->remote_ip = 0; amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]); if (ipv4_is_loopback(amt->discovery_ip) || ipv4_is_zeronet(amt->discovery_ip) || ipv4_is_multicast(amt->discovery_ip)) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP], "discovery must be unicast"); goto err; } dev->needed_headroom = amt->stream_dev->needed_headroom + AMT_GW_HLEN; dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN; dev->max_mtu = dev->mtu; dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN; } amt->qi = AMT_INIT_QUERY_INTERVAL; err = register_netdevice(dev); if (err < 0) { netdev_dbg(dev, "failed to register new netdev %d\n", err); goto err; } err = netdev_upper_dev_link(amt->stream_dev, dev, extack); if (err < 0) { unregister_netdevice(dev); goto err; } INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work); INIT_DELAYED_WORK(&amt->req_wq, amt_req_work); INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work); INIT_WORK(&amt->event_wq, amt_event_work); INIT_LIST_HEAD(&amt->tunnel_list); return 0; err: dev_put(amt->stream_dev); return err; } static void amt_dellink(struct net_device *dev, struct list_head *head) { struct amt_dev *amt = netdev_priv(dev); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(amt->stream_dev, dev); dev_put(amt->stream_dev); } static size_t amt_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */ nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */ nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */ nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */ nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */ nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */ nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */ nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */ } static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct amt_dev *amt = netdev_priv(dev); if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex)) goto nla_put_failure; if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip)) goto nla_put_failure; if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip)) goto nla_put_failure; if (amt->remote_ip) if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops amt_link_ops __read_mostly = { .kind = "amt", .maxtype = IFLA_AMT_MAX, .policy = amt_policy, .priv_size = sizeof(struct amt_dev), .setup = amt_link_setup, .validate = amt_validate, .newlink = amt_newlink, .dellink = amt_dellink, .get_size = amt_get_size, .fill_info = amt_fill_info, }; static struct net_device *amt_lookup_upper_dev(struct net_device *dev) { struct net_device *upper_dev; struct amt_dev *amt; for_each_netdev(dev_net(dev), upper_dev) { if (netif_is_amt(upper_dev)) { amt = netdev_priv(upper_dev); if (amt->stream_dev == dev) return upper_dev; } } return NULL; } static int amt_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *upper_dev; struct amt_dev *amt; LIST_HEAD(list); int new_mtu; upper_dev = amt_lookup_upper_dev(dev); if (!upper_dev) return NOTIFY_DONE; amt = netdev_priv(upper_dev); switch (event) { case NETDEV_UNREGISTER: amt_dellink(amt->dev, &list); unregister_netdevice_many(&list); break; case NETDEV_CHANGEMTU: if (amt->mode == AMT_MODE_RELAY) new_mtu = dev->mtu - AMT_RELAY_HLEN; else new_mtu = dev->mtu - AMT_GW_HLEN; dev_set_mtu(amt->dev, new_mtu); break; } return NOTIFY_DONE; } static struct notifier_block amt_notifier_block __read_mostly = { .notifier_call = amt_device_event, }; static int __init amt_init(void) { int err; err = register_netdevice_notifier(&amt_notifier_block); if (err < 0) goto err; err = rtnl_link_register(&amt_link_ops); if (err < 0) goto unregister_notifier; amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0); if (!amt_wq) { err = -ENOMEM; goto rtnl_unregister; } spin_lock_init(&source_gc_lock); spin_lock_bh(&source_gc_lock); INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work); mod_delayed_work(amt_wq, &source_gc_wq, msecs_to_jiffies(AMT_GC_INTERVAL)); spin_unlock_bh(&source_gc_lock); return 0; rtnl_unregister: rtnl_link_unregister(&amt_link_ops); unregister_notifier: unregister_netdevice_notifier(&amt_notifier_block); err: pr_err("error loading AMT module loaded\n"); return err; } late_initcall(amt_init); static void __exit amt_fini(void) { rtnl_link_unregister(&amt_link_ops); unregister_netdevice_notifier(&amt_notifier_block); cancel_delayed_work_sync(&source_gc_wq); __amt_source_gc_work(); destroy_workqueue(amt_wq); } module_exit(amt_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Taehee Yoo <[email protected]>"); MODULE_ALIAS_RTNL_LINK("amt");
linux-master
drivers/net/amt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* GTP according to GSM TS 09.60 / 3GPP TS 29.060 * * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH * (C) 2016 by Pablo Neira Ayuso <[email protected]> * * Author: Harald Welte <[email protected]> * Pablo Neira Ayuso <[email protected]> * Andreas Schultz <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/rculist.h> #include <linux/jhash.h> #include <linux/if_tunnel.h> #include <linux/net.h> #include <linux/file.h> #include <linux/gtp.h> #include <net/net_namespace.h> #include <net/protocol.h> #include <net/ip.h> #include <net/udp.h> #include <net/udp_tunnel.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/genetlink.h> #include <net/netns/generic.h> #include <net/gtp.h> /* An active session for the subscriber. */ struct pdp_ctx { struct hlist_node hlist_tid; struct hlist_node hlist_addr; union { struct { u64 tid; u16 flow; } v0; struct { u32 i_tei; u32 o_tei; } v1; } u; u8 gtp_version; u16 af; struct in_addr ms_addr_ip4; struct in_addr peer_addr_ip4; struct sock *sk; struct net_device *dev; atomic_t tx_seq; struct rcu_head rcu_head; }; /* One instance of the GTP device. */ struct gtp_dev { struct list_head list; struct sock *sk0; struct sock *sk1u; u8 sk_created; struct net_device *dev; struct net *net; unsigned int role; unsigned int hash_size; struct hlist_head *tid_hash; struct hlist_head *addr_hash; u8 restart_count; }; struct echo_info { struct in_addr ms_addr_ip4; struct in_addr peer_addr_ip4; u8 gtp_version; }; static unsigned int gtp_net_id __read_mostly; struct gtp_net { struct list_head gtp_dev_list; }; static u32 gtp_h_initval; static struct genl_family gtp_genl_family; enum gtp_multicast_groups { GTP_GENL_MCGRP, }; static const struct genl_multicast_group gtp_genl_mcgrps[] = { [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME }, }; static void pdp_context_delete(struct pdp_ctx *pctx); static inline u32 gtp0_hashfn(u64 tid) { u32 *tid32 = (u32 *) &tid; return jhash_2words(tid32[0], tid32[1], gtp_h_initval); } static inline u32 gtp1u_hashfn(u32 tid) { return jhash_1word(tid, gtp_h_initval); } static inline u32 ipv4_hashfn(__be32 ip) { return jhash_1word((__force u32)ip, gtp_h_initval); } /* Resolve a PDP context structure based on the 64bit TID. */ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid) { struct hlist_head *head; struct pdp_ctx *pdp; head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_tid) { if (pdp->gtp_version == GTP_V0 && pdp->u.v0.tid == tid) return pdp; } return NULL; } /* Resolve a PDP context structure based on the 32bit TEI. */ static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid) { struct hlist_head *head; struct pdp_ctx *pdp; head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_tid) { if (pdp->gtp_version == GTP_V1 && pdp->u.v1.i_tei == tid) return pdp; } return NULL; } /* Resolve a PDP context based on IPv4 address of MS. */ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr) { struct hlist_head *head; struct pdp_ctx *pdp; head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_addr) { if (pdp->af == AF_INET && pdp->ms_addr_ip4.s_addr == ms_addr) return pdp; } return NULL; } static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, unsigned int hdrlen, unsigned int role) { struct iphdr *iph; if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) return false; iph = (struct iphdr *)(skb->data + hdrlen); if (role == GTP_ROLE_SGSN) return iph->daddr == pctx->ms_addr_ip4.s_addr; else return iph->saddr == pctx->ms_addr_ip4.s_addr; } /* Check if the inner IP address in this packet is assigned to any * existing mobile subscriber. */ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, unsigned int hdrlen, unsigned int role) { switch (ntohs(skb->protocol)) { case ETH_P_IP: return gtp_check_ms_ipv4(skb, pctx, hdrlen, role); } return false; } static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, unsigned int hdrlen, unsigned int role) { if (!gtp_check_ms(skb, pctx, hdrlen, role)) { netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); return 1; } /* Get rid of the GTP + UDP headers. */ if (iptunnel_pull_header(skb, hdrlen, skb->protocol, !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) { pctx->dev->stats.rx_length_errors++; goto err; } netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); /* Now that the UDP and the GTP header have been removed, set up the * new network header. This is required by the upper layer to * calculate the transport header. */ skb_reset_network_header(skb); skb_reset_mac_header(skb); skb->dev = pctx->dev; dev_sw_netstats_rx_add(pctx->dev, skb->len); __netif_rx(skb); return 0; err: pctx->dev->stats.rx_dropped++; return -1; } static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, const struct sock *sk, __be32 daddr, __be32 saddr) { memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_oif = sk->sk_bound_dev_if; fl4->daddr = daddr; fl4->saddr = saddr; fl4->flowi4_tos = ip_sock_rt_tos(sk); fl4->flowi4_scope = ip_sock_rt_scope(sk); fl4->flowi4_proto = sk->sk_protocol; return ip_route_output_key(sock_net(sk), fl4); } /* GSM TS 09.60. 7.3 * In all Path Management messages: * - TID: is not used and shall be set to 0. * - Flow Label is not used and shall be set to 0 * In signalling messages: * - number: this field is not yet used in signalling messages. * It shall be set to 255 by the sender and shall be ignored * by the receiver * Returns true if the echo req was correct, false otherwise. */ static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0) { return !(gtp0->tid || (gtp0->flags ^ 0x1e) || gtp0->number != 0xff || gtp0->flow); } /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */ static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type) { int len_pkt, len_hdr; hdr->flags = 0x1e; /* v0, GTP-non-prime. */ hdr->type = msg_type; /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID * are not used and shall be set to 0. */ hdr->flow = 0; hdr->tid = 0; hdr->number = 0xff; hdr->spare[0] = 0xff; hdr->spare[1] = 0xff; hdr->spare[2] = 0xff; len_pkt = sizeof(struct gtp0_packet); len_hdr = sizeof(struct gtp0_header); if (msg_type == GTP_ECHO_RSP) hdr->length = htons(len_pkt - len_hdr); else hdr->length = 0; } static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp0_packet *gtp_pkt; struct gtp0_header *gtp0; struct rtable *rt; struct flowi4 fl4; struct iphdr *iph; __be16 seq; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); if (!gtp0_validate_echo_hdr(gtp0)) return -1; seq = gtp0->seq; /* pull GTP and UDP headers */ skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr)); gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet)); memset(gtp_pkt, 0, sizeof(struct gtp0_packet)); gtp0_build_echo_msg(&gtp_pkt->gtp0_h, GTP_ECHO_RSP); /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response * message shall be copied from the signalling request message * that the GSN is replying to. */ gtp_pkt->gtp0_h.seq = seq; gtp_pkt->ie.tag = GTPIE_RECOVERY; gtp_pkt->ie.val = gtp->restart_count; iph = ip_hdr(skb); /* find route to the sender, * src address becomes dst address and vice versa. */ rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr); if (IS_ERR(rt)) { netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", &iph->saddr); return -1; } udp_tunnel_xmit_skb(rt, gtp->sk0, skb, fl4.saddr, fl4.daddr, iph->tos, ip4_dst_hoplimit(&rt->dst), 0, htons(GTP0_PORT), htons(GTP0_PORT), !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)), false); return 0; } static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, int flags, u32 type, struct echo_info echo) { void *genlh; genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags, type); if (!genlh) goto failure; if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) || nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr)) goto failure; genlmsg_end(skb, genlh); return 0; failure: genlmsg_cancel(skb, genlh); return -EMSGSIZE; } static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp0_header *gtp0; struct echo_info echo; struct sk_buff *msg; struct iphdr *iph; int ret; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); if (!gtp0_validate_echo_hdr(gtp0)) return -1; iph = ip_hdr(skb); echo.ms_addr_ip4.s_addr = iph->daddr; echo.peer_addr_ip4.s_addr = iph->saddr; echo.gtp_version = GTP_V0; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo); if (ret < 0) { nlmsg_free(msg); return ret; } return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev), msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); } /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp0_header); struct gtp0_header *gtp0; struct pdp_ctx *pctx; if (!pskb_may_pull(skb, hdrlen)) return -1; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); if ((gtp0->flags >> 5) != GTP_V0) return 1; /* If the sockets were created in kernel, it means that * there is no daemon running in userspace which would * handle echo request. */ if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created) return gtp0_send_echo_resp(gtp, skb); if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created) return gtp0_handle_echo_resp(gtp, skb); if (gtp0->type != GTP_TPDU) return 1; pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); return 1; } return gtp_rx(pctx, skb, hdrlen, gtp->role); } /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */ static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type) { int len_pkt, len_hdr; /* S flag must be set to 1 */ hdr->flags = 0x32; /* v1, GTP-non-prime. */ hdr->type = msg_type; /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */ hdr->tid = 0; /* seq, npdu and next should be counted to the length of the GTP packet * that's why szie of gtp1_header should be subtracted, * not size of gtp1_header_long. */ len_hdr = sizeof(struct gtp1_header); if (msg_type == GTP_ECHO_RSP) { len_pkt = sizeof(struct gtp1u_packet); hdr->length = htons(len_pkt - len_hdr); } else { /* GTP_ECHO_REQ does not carry GTP Information Element, * the why gtp1_header_long is used here. */ len_pkt = sizeof(struct gtp1_header_long); hdr->length = htons(len_pkt - len_hdr); } } static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp1_header_long *gtp1u; struct gtp1u_packet *gtp_pkt; struct rtable *rt; struct flowi4 fl4; struct iphdr *iph; gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr)); /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response, * Error Indication and Supported Extension Headers Notification * messages, the S flag shall be set to 1 and TEID shall be set to 0. */ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid) return -1; /* pull GTP and UDP headers */ skb_pull_data(skb, sizeof(struct gtp1_header_long) + sizeof(struct udphdr)); gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet)); memset(gtp_pkt, 0, sizeof(struct gtp1u_packet)); gtp1u_build_echo_msg(&gtp_pkt->gtp1u_h, GTP_ECHO_RSP); /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the * Recovery information element shall not be used, i.e. it shall * be set to zero by the sender and shall be ignored by the receiver. * The Recovery information element is mandatory due to backwards * compatibility reasons. */ gtp_pkt->ie.tag = GTPIE_RECOVERY; gtp_pkt->ie.val = 0; iph = ip_hdr(skb); /* find route to the sender, * src address becomes dst address and vice versa. */ rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr); if (IS_ERR(rt)) { netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", &iph->saddr); return -1; } udp_tunnel_xmit_skb(rt, gtp->sk1u, skb, fl4.saddr, fl4.daddr, iph->tos, ip4_dst_hoplimit(&rt->dst), 0, htons(GTP1U_PORT), htons(GTP1U_PORT), !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)), false); return 0; } static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp1_header_long *gtp1u; struct echo_info echo; struct sk_buff *msg; struct iphdr *iph; int ret; gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr)); /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response, * Error Indication and Supported Extension Headers Notification * messages, the S flag shall be set to 1 and TEID shall be set to 0. */ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid) return -1; iph = ip_hdr(skb); echo.ms_addr_ip4.s_addr = iph->daddr; echo.peer_addr_ip4.s_addr = iph->saddr; echo.gtp_version = GTP_V1; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo); if (ret < 0) { nlmsg_free(msg); return ret; } return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev), msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); } static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp1_header); struct gtp1_header *gtp1; struct pdp_ctx *pctx; if (!pskb_may_pull(skb, hdrlen)) return -1; gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); if ((gtp1->flags >> 5) != GTP_V1) return 1; /* If the sockets were created in kernel, it means that * there is no daemon running in userspace which would * handle echo request. */ if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created) return gtp1u_send_echo_resp(gtp, skb); if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created) return gtp1u_handle_echo_resp(gtp, skb); if (gtp1->type != GTP_TPDU) return 1; /* From 29.060: "This field shall be present if and only if any one or * more of the S, PN and E flags are set.". * * If any of the bit is set, then the remaining ones also have to be * set. */ if (gtp1->flags & GTP1_F_MASK) hdrlen += 4; /* Make sure the header is larger enough, including extensions. */ if (!pskb_may_pull(skb, hdrlen)) return -1; gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); return 1; } return gtp_rx(pctx, skb, hdrlen, gtp->role); } static void __gtp_encap_destroy(struct sock *sk) { struct gtp_dev *gtp; lock_sock(sk); gtp = sk->sk_user_data; if (gtp) { if (gtp->sk0 == sk) gtp->sk0 = NULL; else gtp->sk1u = NULL; udp_sk(sk)->encap_type = 0; rcu_assign_sk_user_data(sk, NULL); release_sock(sk); sock_put(sk); return; } release_sock(sk); } static void gtp_encap_destroy(struct sock *sk) { rtnl_lock(); __gtp_encap_destroy(sk); rtnl_unlock(); } static void gtp_encap_disable_sock(struct sock *sk) { if (!sk) return; __gtp_encap_destroy(sk); } static void gtp_encap_disable(struct gtp_dev *gtp) { if (gtp->sk_created) { udp_tunnel_sock_release(gtp->sk0->sk_socket); udp_tunnel_sock_release(gtp->sk1u->sk_socket); gtp->sk_created = false; gtp->sk0 = NULL; gtp->sk1u = NULL; } else { gtp_encap_disable_sock(gtp->sk0); gtp_encap_disable_sock(gtp->sk1u); } } /* UDP encapsulation receive handler. See net/ipv4/udp.c. * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket. */ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct gtp_dev *gtp; int ret = 0; gtp = rcu_dereference_sk_user_data(sk); if (!gtp) return 1; netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); switch (udp_sk(sk)->encap_type) { case UDP_ENCAP_GTP0: netdev_dbg(gtp->dev, "received GTP0 packet\n"); ret = gtp0_udp_encap_recv(gtp, skb); break; case UDP_ENCAP_GTP1U: netdev_dbg(gtp->dev, "received GTP1U packet\n"); ret = gtp1u_udp_encap_recv(gtp, skb); break; default: ret = -1; /* Shouldn't happen. */ } switch (ret) { case 1: netdev_dbg(gtp->dev, "pass up to the process\n"); break; case 0: break; case -1: netdev_dbg(gtp->dev, "GTP packet has been dropped\n"); kfree_skb(skb); ret = 0; break; } return ret; } static int gtp_dev_init(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); gtp->dev = dev; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; return 0; } static void gtp_dev_uninit(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); gtp_encap_disable(gtp); free_percpu(dev->tstats); } static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) { int payload_len = skb->len; struct gtp0_header *gtp0; gtp0 = skb_push(skb, sizeof(*gtp0)); gtp0->flags = 0x1e; /* v0, GTP-non-prime. */ gtp0->type = GTP_TPDU; gtp0->length = htons(payload_len); gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff); gtp0->flow = htons(pctx->u.v0.flow); gtp0->number = 0xff; gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff; gtp0->tid = cpu_to_be64(pctx->u.v0.tid); } static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) { int payload_len = skb->len; struct gtp1_header *gtp1; gtp1 = skb_push(skb, sizeof(*gtp1)); /* Bits 8 7 6 5 4 3 2 1 * +--+--+--+--+--+--+--+--+ * |version |PT| 0| E| S|PN| * +--+--+--+--+--+--+--+--+ * 0 0 1 1 1 0 0 0 */ gtp1->flags = 0x30; /* v1, GTP-non-prime. */ gtp1->type = GTP_TPDU; gtp1->length = htons(payload_len); gtp1->tid = htonl(pctx->u.v1.o_tei); /* TODO: Support for extension header, sequence number and N-PDU. * Update the length field if any of them is available. */ } struct gtp_pktinfo { struct sock *sk; struct iphdr *iph; struct flowi4 fl4; struct rtable *rt; struct pdp_ctx *pctx; struct net_device *dev; __be16 gtph_port; }; static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo) { switch (pktinfo->pctx->gtp_version) { case GTP_V0: pktinfo->gtph_port = htons(GTP0_PORT); gtp0_push_header(skb, pktinfo->pctx); break; case GTP_V1: pktinfo->gtph_port = htons(GTP1U_PORT); gtp1_push_header(skb, pktinfo->pctx); break; } } static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, struct sock *sk, struct iphdr *iph, struct pdp_ctx *pctx, struct rtable *rt, struct flowi4 *fl4, struct net_device *dev) { pktinfo->sk = sk; pktinfo->iph = iph; pktinfo->pctx = pctx; pktinfo->rt = rt; pktinfo->fl4 = *fl4; pktinfo->dev = dev; } static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, struct gtp_pktinfo *pktinfo) { struct gtp_dev *gtp = netdev_priv(dev); struct pdp_ctx *pctx; struct rtable *rt; struct flowi4 fl4; struct iphdr *iph; __be16 df; int mtu; /* Read the IP destination address and resolve the PDP context. * Prepend PDP header with TEI/TID from PDP ctx. */ iph = ip_hdr(skb); if (gtp->role == GTP_ROLE_SGSN) pctx = ipv4_pdp_find(gtp, iph->saddr); else pctx = ipv4_pdp_find(gtp, iph->daddr); if (!pctx) { netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", &iph->daddr); return -ENOENT; } netdev_dbg(dev, "found PDP context %p\n", pctx); rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr, inet_sk(pctx->sk)->inet_saddr); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to SSGN %pI4\n", &pctx->peer_addr_ip4.s_addr); dev->stats.tx_carrier_errors++; goto err; } if (rt->dst.dev == dev) { netdev_dbg(dev, "circular route to SSGN %pI4\n", &pctx->peer_addr_ip4.s_addr); dev->stats.collisions++; goto err_rt; } /* This is similar to tnl_update_pmtu(). */ df = iph->frag_off; if (df) { mtu = dst_mtu(&rt->dst) - dev->hard_header_len - sizeof(struct iphdr) - sizeof(struct udphdr); switch (pctx->gtp_version) { case GTP_V0: mtu -= sizeof(struct gtp0_header); break; case GTP_V1: mtu -= sizeof(struct gtp1_header); break; } } else { mtu = dst_mtu(&rt->dst); } skb_dst_update_pmtu_no_confirm(skb, mtu); if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); goto err_rt; } gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); gtp_push_header(skb, pktinfo); return 0; err_rt: ip_rt_put(rt); err: return -EBADMSG; } static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int proto = ntohs(skb->protocol); struct gtp_pktinfo pktinfo; int err; /* Ensure there is sufficient headroom. */ if (skb_cow_head(skb, dev->needed_headroom)) goto tx_err; skb_reset_inner_headers(skb); /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ rcu_read_lock(); switch (proto) { case ETH_P_IP: err = gtp_build_skb_ip4(skb, dev, &pktinfo); break; default: err = -EOPNOTSUPP; break; } rcu_read_unlock(); if (err < 0) goto tx_err; switch (proto) { case ETH_P_IP: netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n", &pktinfo.iph->saddr, &pktinfo.iph->daddr); udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, pktinfo.fl4.saddr, pktinfo.fl4.daddr, pktinfo.iph->tos, ip4_dst_hoplimit(&pktinfo.rt->dst), 0, pktinfo.gtph_port, pktinfo.gtph_port, !net_eq(sock_net(pktinfo.pctx->sk), dev_net(dev)), false); break; } return NETDEV_TX_OK; tx_err: dev->stats.tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static const struct net_device_ops gtp_netdev_ops = { .ndo_init = gtp_dev_init, .ndo_uninit = gtp_dev_uninit, .ndo_start_xmit = gtp_dev_xmit, .ndo_get_stats64 = dev_get_tstats64, }; static const struct device_type gtp_type = { .name = "gtp", }; static void gtp_link_setup(struct net_device *dev) { unsigned int max_gtp_header_len = sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(struct gtp0_header); dev->netdev_ops = &gtp_netdev_ops; dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &gtp_type); dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = ETH_DATA_LEN - max_gtp_header_len; /* Zero header length. */ dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->priv_flags |= IFF_NO_QUEUE; dev->features |= NETIF_F_LLTX; netif_keep_dst(dev); dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len; } static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); static void gtp_destructor(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); kfree(gtp->addr_hash); kfree(gtp->tid_hash); } static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp) { struct udp_tunnel_sock_cfg tuncfg = {}; struct udp_port_cfg udp_conf = { .local_ip.s_addr = htonl(INADDR_ANY), .family = AF_INET, }; struct net *net = gtp->net; struct socket *sock; int err; if (type == UDP_ENCAP_GTP0) udp_conf.local_udp_port = htons(GTP0_PORT); else if (type == UDP_ENCAP_GTP1U) udp_conf.local_udp_port = htons(GTP1U_PORT); else return ERR_PTR(-EINVAL); err = udp_sock_create(net, &udp_conf, &sock); if (err) return ERR_PTR(err); tuncfg.sk_user_data = gtp; tuncfg.encap_type = type; tuncfg.encap_rcv = gtp_encap_recv; tuncfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tuncfg); return sock->sk; } static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[]) { struct sock *sk1u = NULL; struct sock *sk0 = NULL; sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp); if (IS_ERR(sk0)) return PTR_ERR(sk0); sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp); if (IS_ERR(sk1u)) { udp_tunnel_sock_release(sk0->sk_socket); return PTR_ERR(sk1u); } gtp->sk_created = true; gtp->sk0 = sk0; gtp->sk1u = sk1u; return 0; } static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { unsigned int role = GTP_ROLE_GGSN; struct gtp_dev *gtp; struct gtp_net *gn; int hashsize, err; gtp = netdev_priv(dev); if (!data[IFLA_GTP_PDP_HASHSIZE]) { hashsize = 1024; } else { hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); if (!hashsize) hashsize = 1024; } if (data[IFLA_GTP_ROLE]) { role = nla_get_u32(data[IFLA_GTP_ROLE]); if (role > GTP_ROLE_SGSN) return -EINVAL; } gtp->role = role; if (!data[IFLA_GTP_RESTART_COUNT]) gtp->restart_count = 0; else gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]); gtp->net = src_net; err = gtp_hashtable_new(gtp, hashsize); if (err < 0) return err; if (data[IFLA_GTP_CREATE_SOCKETS]) err = gtp_create_sockets(gtp, data); else err = gtp_encap_enable(gtp, data); if (err < 0) goto out_hashtable; err = register_netdevice(dev); if (err < 0) { netdev_dbg(dev, "failed to register new netdev %d\n", err); goto out_encap; } gn = net_generic(dev_net(dev), gtp_net_id); list_add_rcu(&gtp->list, &gn->gtp_dev_list); dev->priv_destructor = gtp_destructor; netdev_dbg(dev, "registered new GTP interface\n"); return 0; out_encap: gtp_encap_disable(gtp); out_hashtable: kfree(gtp->addr_hash); kfree(gtp->tid_hash); return err; } static void gtp_dellink(struct net_device *dev, struct list_head *head) { struct gtp_dev *gtp = netdev_priv(dev); struct pdp_ctx *pctx; int i; for (i = 0; i < gtp->hash_size; i++) hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) pdp_context_delete(pctx); list_del_rcu(&gtp->list); unregister_netdevice_queue(dev, head); } static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { [IFLA_GTP_FD0] = { .type = NLA_U32 }, [IFLA_GTP_FD1] = { .type = NLA_U32 }, [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, [IFLA_GTP_ROLE] = { .type = NLA_U32 }, [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 }, [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 }, }; static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (!data) return -EINVAL; return 0; } static size_t gtp_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */ nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */ nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */ } static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops gtp_link_ops __read_mostly = { .kind = "gtp", .maxtype = IFLA_GTP_MAX, .policy = gtp_policy, .priv_size = sizeof(struct gtp_dev), .setup = gtp_link_setup, .validate = gtp_validate, .newlink = gtp_newlink, .dellink = gtp_dellink, .get_size = gtp_get_size, .fill_info = gtp_fill_info, }; static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) { int i; gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head), GFP_KERNEL | __GFP_NOWARN); if (gtp->addr_hash == NULL) return -ENOMEM; gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head), GFP_KERNEL | __GFP_NOWARN); if (gtp->tid_hash == NULL) goto err1; gtp->hash_size = hsize; for (i = 0; i < hsize; i++) { INIT_HLIST_HEAD(&gtp->addr_hash[i]); INIT_HLIST_HEAD(&gtp->tid_hash[i]); } return 0; err1: kfree(gtp->addr_hash); return -ENOMEM; } static struct sock *gtp_encap_enable_socket(int fd, int type, struct gtp_dev *gtp) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct socket *sock; struct sock *sk; int err; pr_debug("enable gtp on %d, %d\n", fd, type); sock = sockfd_lookup(fd, &err); if (!sock) { pr_debug("gtp socket fd=%d not found\n", fd); return NULL; } sk = sock->sk; if (sk->sk_protocol != IPPROTO_UDP || sk->sk_type != SOCK_DGRAM || (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { pr_debug("socket fd=%d not UDP\n", fd); sk = ERR_PTR(-EINVAL); goto out_sock; } lock_sock(sk); if (sk->sk_user_data) { sk = ERR_PTR(-EBUSY); goto out_rel_sock; } sock_hold(sk); tuncfg.sk_user_data = gtp; tuncfg.encap_type = type; tuncfg.encap_rcv = gtp_encap_recv; tuncfg.encap_destroy = gtp_encap_destroy; setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); out_rel_sock: release_sock(sock->sk); out_sock: sockfd_put(sock); return sk; } static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) { struct sock *sk1u = NULL; struct sock *sk0 = NULL; if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) return -EINVAL; if (data[IFLA_GTP_FD0]) { u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp); if (IS_ERR(sk0)) return PTR_ERR(sk0); } if (data[IFLA_GTP_FD1]) { u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp); if (IS_ERR(sk1u)) { gtp_encap_disable_sock(sk0); return PTR_ERR(sk1u); } } gtp->sk0 = sk0; gtp->sk1u = sk1u; return 0; } static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) { struct gtp_dev *gtp = NULL; struct net_device *dev; struct net *net; /* Examine the link attributes and figure out which network namespace * we are talking about. */ if (nla[GTPA_NET_NS_FD]) net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD])); else net = get_net(src_net); if (IS_ERR(net)) return NULL; /* Check if there's an existing gtpX device to configure */ dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); if (dev && dev->netdev_ops == &gtp_netdev_ops) gtp = netdev_priv(dev); put_net(net); return gtp; } static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) { pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); pctx->af = AF_INET; pctx->peer_addr_ip4.s_addr = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); pctx->ms_addr_ip4.s_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); switch (pctx->gtp_version) { case GTP_V0: /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow * label needs to be the same for uplink and downlink packets, * so let's annotate this. */ pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]); pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]); break; case GTP_V1: pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]); pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]); break; default: break; } } static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, struct genl_info *info) { struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; u32 hash_ms, hash_tid = 0; unsigned int version; bool found = false; __be32 ms_addr; ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; version = nla_get_u32(info->attrs[GTPA_VERSION]); pctx = ipv4_pdp_find(gtp, ms_addr); if (pctx) found = true; if (version == GTP_V0) pctx_tid = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID])); else if (version == GTP_V1) pctx_tid = gtp1_pdp_find(gtp, nla_get_u32(info->attrs[GTPA_I_TEI])); if (pctx_tid) found = true; if (found) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) return ERR_PTR(-EEXIST); if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) return ERR_PTR(-EOPNOTSUPP); if (pctx && pctx_tid) return ERR_PTR(-EEXIST); if (!pctx) pctx = pctx_tid; ipv4_pdp_fill(pctx, info); if (pctx->gtp_version == GTP_V0) netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n", pctx->u.v0.tid, pctx); else if (pctx->gtp_version == GTP_V1) netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); return pctx; } pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC); if (pctx == NULL) return ERR_PTR(-ENOMEM); sock_hold(sk); pctx->sk = sk; pctx->dev = gtp->dev; ipv4_pdp_fill(pctx, info); atomic_set(&pctx->tx_seq, 0); switch (pctx->gtp_version) { case GTP_V0: /* TS 09.60: "The flow label identifies unambiguously a GTP * flow.". We use the tid for this instead, I cannot find a * situation in which this doesn't unambiguosly identify the * PDP context. */ hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size; break; case GTP_V1: hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size; break; } hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]); hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]); switch (pctx->gtp_version) { case GTP_V0: netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n", pctx->u.v0.tid, &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); break; case GTP_V1: netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); break; } return pctx; } static void pdp_context_free(struct rcu_head *head) { struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head); sock_put(pctx->sk); kfree(pctx); } static void pdp_context_delete(struct pdp_ctx *pctx) { hlist_del_rcu(&pctx->hlist_tid); hlist_del_rcu(&pctx->hlist_addr); call_rcu(&pctx->rcu_head, pdp_context_free); } static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation); static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) { unsigned int version; struct pdp_ctx *pctx; struct gtp_dev *gtp; struct sock *sk; int err; if (!info->attrs[GTPA_VERSION] || !info->attrs[GTPA_LINK] || !info->attrs[GTPA_PEER_ADDRESS] || !info->attrs[GTPA_MS_ADDRESS]) return -EINVAL; version = nla_get_u32(info->attrs[GTPA_VERSION]); switch (version) { case GTP_V0: if (!info->attrs[GTPA_TID] || !info->attrs[GTPA_FLOW]) return -EINVAL; break; case GTP_V1: if (!info->attrs[GTPA_I_TEI] || !info->attrs[GTPA_O_TEI]) return -EINVAL; break; default: return -EINVAL; } rtnl_lock(); gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); if (!gtp) { err = -ENODEV; goto out_unlock; } if (version == GTP_V0) sk = gtp->sk0; else if (version == GTP_V1) sk = gtp->sk1u; else sk = NULL; if (!sk) { err = -ENODEV; goto out_unlock; } pctx = gtp_pdp_add(gtp, sk, info); if (IS_ERR(pctx)) { err = PTR_ERR(pctx); } else { gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL); err = 0; } out_unlock: rtnl_unlock(); return err; } static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net, struct nlattr *nla[]) { struct gtp_dev *gtp; gtp = gtp_find_dev(net, nla); if (!gtp) return ERR_PTR(-ENODEV); if (nla[GTPA_MS_ADDRESS]) { __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]); return ipv4_pdp_find(gtp, ip); } else if (nla[GTPA_VERSION]) { u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]); if (gtp_version == GTP_V0 && nla[GTPA_TID]) return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID])); else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI])); } return ERR_PTR(-EINVAL); } static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[]) { struct pdp_ctx *pctx; if (nla[GTPA_LINK]) pctx = gtp_find_pdp_by_link(net, nla); else pctx = ERR_PTR(-EINVAL); if (!pctx) pctx = ERR_PTR(-ENOENT); return pctx; } static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) { struct pdp_ctx *pctx; int err = 0; if (!info->attrs[GTPA_VERSION]) return -EINVAL; rcu_read_lock(); pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); if (IS_ERR(pctx)) { err = PTR_ERR(pctx); goto out_unlock; } if (pctx->gtp_version == GTP_V0) netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", pctx->u.v0.tid, pctx); else if (pctx->gtp_version == GTP_V1) netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC); pdp_context_delete(pctx); out_unlock: rcu_read_unlock(); return err; } static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, int flags, u32 type, struct pdp_ctx *pctx) { void *genlh; genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags, type); if (genlh == NULL) goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; switch (pctx->gtp_version) { case GTP_V0: if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow)) goto nla_put_failure; break; case GTP_V1: if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) || nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei)) goto nla_put_failure; break; } genlmsg_end(skb, genlh); return 0; nlmsg_failure: nla_put_failure: genlmsg_cancel(skb, genlh); return -EMSGSIZE; } static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation) { struct sk_buff *msg; int ret; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation); if (!msg) return -ENOMEM; ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx); if (ret < 0) { nlmsg_free(msg); return ret; } ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); return ret; } static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) { struct pdp_ctx *pctx = NULL; struct sk_buff *skb2; int err; if (!info->attrs[GTPA_VERSION]) return -EINVAL; rcu_read_lock(); pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); if (IS_ERR(pctx)) { err = PTR_ERR(pctx); goto err_unlock; } skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (skb2 == NULL) { err = -ENOMEM; goto err_unlock; } err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, 0, info->nlhdr->nlmsg_type, pctx); if (err < 0) goto err_unlock_free; rcu_read_unlock(); return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid); err_unlock_free: kfree_skb(skb2); err_unlock: rcu_read_unlock(); return err; } static int gtp_genl_dump_pdp(struct sk_buff *skb, struct netlink_callback *cb) { struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; int i, j, bucket = cb->args[0], skip = cb->args[1]; struct net *net = sock_net(skb->sk); struct pdp_ctx *pctx; struct gtp_net *gn; gn = net_generic(net, gtp_net_id); if (cb->args[4]) return 0; rcu_read_lock(); list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { if (last_gtp && last_gtp != gtp) continue; else last_gtp = NULL; for (i = bucket; i < gtp->hash_size; i++) { j = 0; hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) { if (j >= skip && gtp_genl_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; cb->args[1] = j; cb->args[2] = (unsigned long)gtp; goto out; } j++; } skip = 0; } bucket = 0; } cb->args[4] = 1; out: rcu_read_unlock(); return skb->len; } static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *skb_to_send; __be32 src_ip, dst_ip; unsigned int version; struct gtp_dev *gtp; struct flowi4 fl4; struct rtable *rt; struct sock *sk; __be16 port; int len; if (!info->attrs[GTPA_VERSION] || !info->attrs[GTPA_LINK] || !info->attrs[GTPA_PEER_ADDRESS] || !info->attrs[GTPA_MS_ADDRESS]) return -EINVAL; version = nla_get_u32(info->attrs[GTPA_VERSION]); dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); if (!gtp) return -ENODEV; if (!gtp->sk_created) return -EOPNOTSUPP; if (!(gtp->dev->flags & IFF_UP)) return -ENETDOWN; if (version == GTP_V0) { struct gtp0_header *gtp0_h; len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) + sizeof(struct iphdr) + sizeof(struct udphdr); skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len); if (!skb_to_send) return -ENOMEM; sk = gtp->sk0; port = htons(GTP0_PORT); gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header)); memset(gtp0_h, 0, sizeof(struct gtp0_header)); gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ); } else if (version == GTP_V1) { struct gtp1_header_long *gtp1u_h; len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp1_header_long) + sizeof(struct iphdr) + sizeof(struct udphdr); skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len); if (!skb_to_send) return -ENOMEM; sk = gtp->sk1u; port = htons(GTP1U_PORT); gtp1u_h = skb_push(skb_to_send, sizeof(struct gtp1_header_long)); memset(gtp1u_h, 0, sizeof(struct gtp1_header_long)); gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ); } else { return -ENODEV; } rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip); if (IS_ERR(rt)) { netdev_dbg(gtp->dev, "no route for echo request to %pI4\n", &dst_ip); kfree_skb(skb_to_send); return -ENODEV; } udp_tunnel_xmit_skb(rt, sk, skb_to_send, fl4.saddr, fl4.daddr, fl4.flowi4_tos, ip4_dst_hoplimit(&rt->dst), 0, port, port, !net_eq(sock_net(sk), dev_net(gtp->dev)), false); return 0; } static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_LINK] = { .type = NLA_U32, }, [GTPA_VERSION] = { .type = NLA_U32, }, [GTPA_TID] = { .type = NLA_U64, }, [GTPA_PEER_ADDRESS] = { .type = NLA_U32, }, [GTPA_MS_ADDRESS] = { .type = NLA_U32, }, [GTPA_FLOW] = { .type = NLA_U16, }, [GTPA_NET_NS_FD] = { .type = NLA_U32, }, [GTPA_I_TEI] = { .type = NLA_U32, }, [GTPA_O_TEI] = { .type = NLA_U32, }, }; static const struct genl_small_ops gtp_genl_ops[] = { { .cmd = GTP_CMD_NEWPDP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_new_pdp, .flags = GENL_ADMIN_PERM, }, { .cmd = GTP_CMD_DELPDP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_del_pdp, .flags = GENL_ADMIN_PERM, }, { .cmd = GTP_CMD_GETPDP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_get_pdp, .dumpit = gtp_genl_dump_pdp, .flags = GENL_ADMIN_PERM, }, { .cmd = GTP_CMD_ECHOREQ, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_send_echo_req, .flags = GENL_ADMIN_PERM, }, }; static struct genl_family gtp_genl_family __ro_after_init = { .name = "gtp", .version = 0, .hdrsize = 0, .maxattr = GTPA_MAX, .policy = gtp_genl_policy, .netnsok = true, .module = THIS_MODULE, .small_ops = gtp_genl_ops, .n_small_ops = ARRAY_SIZE(gtp_genl_ops), .resv_start_op = GTP_CMD_ECHOREQ + 1, .mcgrps = gtp_genl_mcgrps, .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps), }; static int __net_init gtp_net_init(struct net *net) { struct gtp_net *gn = net_generic(net, gtp_net_id); INIT_LIST_HEAD(&gn->gtp_dev_list); return 0; } static void __net_exit gtp_net_exit(struct net *net) { struct gtp_net *gn = net_generic(net, gtp_net_id); struct gtp_dev *gtp; LIST_HEAD(list); rtnl_lock(); list_for_each_entry(gtp, &gn->gtp_dev_list, list) gtp_dellink(gtp->dev, &list); unregister_netdevice_many(&list); rtnl_unlock(); } static struct pernet_operations gtp_net_ops = { .init = gtp_net_init, .exit = gtp_net_exit, .id = &gtp_net_id, .size = sizeof(struct gtp_net), }; static int __init gtp_init(void) { int err; get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval)); err = rtnl_link_register(&gtp_link_ops); if (err < 0) goto error_out; err = genl_register_family(&gtp_genl_family); if (err < 0) goto unreg_rtnl_link; err = register_pernet_subsys(&gtp_net_ops); if (err < 0) goto unreg_genl_family; pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", sizeof(struct pdp_ctx)); return 0; unreg_genl_family: genl_unregister_family(&gtp_genl_family); unreg_rtnl_link: rtnl_link_unregister(&gtp_link_ops); error_out: pr_err("error loading GTP module loaded\n"); return err; } late_initcall(gtp_init); static void __exit gtp_fini(void) { genl_unregister_family(&gtp_genl_family); rtnl_link_unregister(&gtp_link_ops); unregister_pernet_subsys(&gtp_net_ops); pr_info("GTP module unloaded\n"); } module_exit(gtp_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <[email protected]>"); MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); MODULE_ALIAS_RTNL_LINK("gtp"); MODULE_ALIAS_GENL_FAMILY("gtp");
linux-master
drivers/net/gtp.c
// SPDX-License-Identifier: GPL-2.0-only /* * drivers/net/veth.c * * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc * * Author: Pavel Emelianov <[email protected]> * Ethtool interface from: Eric W. Biederman <[email protected]> * */ #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/u64_stats_sync.h> #include <net/rtnetlink.h> #include <net/dst.h> #include <net/xfrm.h> #include <net/xdp.h> #include <linux/veth.h> #include <linux/module.h> #include <linux/bpf.h> #include <linux/filter.h> #include <linux/ptr_ring.h> #include <linux/bpf_trace.h> #include <linux/net_tstamp.h> #include <net/page_pool/helpers.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" #define VETH_XDP_FLAG BIT(0) #define VETH_RING_SIZE 256 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) #define VETH_XDP_TX_BULK_SIZE 16 #define VETH_XDP_BATCH 16 struct veth_stats { u64 rx_drops; /* xdp */ u64 xdp_packets; u64 xdp_bytes; u64 xdp_redirect; u64 xdp_drops; u64 xdp_tx; u64 xdp_tx_err; u64 peer_tq_xdp_xmit; u64 peer_tq_xdp_xmit_err; }; struct veth_rq_stats { struct veth_stats vs; struct u64_stats_sync syncp; }; struct veth_rq { struct napi_struct xdp_napi; struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */ struct net_device *dev; struct bpf_prog __rcu *xdp_prog; struct xdp_mem_info xdp_mem; struct veth_rq_stats stats; bool rx_notify_masked; struct ptr_ring xdp_ring; struct xdp_rxq_info xdp_rxq; struct page_pool *page_pool; }; struct veth_priv { struct net_device __rcu *peer; atomic64_t dropped; struct bpf_prog *_xdp_prog; struct veth_rq *rq; unsigned int requested_headroom; }; struct veth_xdp_tx_bq { struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE]; unsigned int count; }; /* * ethtool interface */ struct veth_q_stat_desc { char desc[ETH_GSTRING_LEN]; size_t offset; }; #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m) static const struct veth_q_stat_desc veth_rq_stats_desc[] = { { "xdp_packets", VETH_RQ_STAT(xdp_packets) }, { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) }, { "drops", VETH_RQ_STAT(rx_drops) }, { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) }, { "xdp_drops", VETH_RQ_STAT(xdp_drops) }, { "xdp_tx", VETH_RQ_STAT(xdp_tx) }, { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) }, }; #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc) static const struct veth_q_stat_desc veth_tq_stats_desc[] = { { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) }, { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) }, }; #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc) static struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "peer_ifindex" }, }; struct veth_xdp_buff { struct xdp_buff xdp; struct sk_buff *skb; }; static int veth_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { cmd->base.speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_TP; cmd->base.autoneg = AUTONEG_DISABLE; return 0; } static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); } static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { u8 *p = buf; int i, j; switch(stringset) { case ETH_SS_STATS: memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); p += sizeof(ethtool_stats_keys); for (i = 0; i < dev->real_num_rx_queues; i++) for (j = 0; j < VETH_RQ_STATS_LEN; j++) ethtool_sprintf(&p, "rx_queue_%u_%.18s", i, veth_rq_stats_desc[j].desc); for (i = 0; i < dev->real_num_tx_queues; i++) for (j = 0; j < VETH_TQ_STATS_LEN; j++) ethtool_sprintf(&p, "tx_queue_%u_%.18s", i, veth_tq_stats_desc[j].desc); page_pool_ethtool_stats_get_strings(p); break; } } static int veth_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ethtool_stats_keys) + VETH_RQ_STATS_LEN * dev->real_num_rx_queues + VETH_TQ_STATS_LEN * dev->real_num_tx_queues + page_pool_ethtool_stats_get_count(); default: return -EOPNOTSUPP; } } static void veth_get_page_pool_stats(struct net_device *dev, u64 *data) { #ifdef CONFIG_PAGE_POOL_STATS struct veth_priv *priv = netdev_priv(dev); struct page_pool_stats pp_stats = {}; int i; for (i = 0; i < dev->real_num_rx_queues; i++) { if (!priv->rq[i].page_pool) continue; page_pool_get_stats(priv->rq[i].page_pool, &pp_stats); } page_pool_ethtool_stats_get(data, &pp_stats); #endif /* CONFIG_PAGE_POOL_STATS */ } static void veth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); int i, j, idx, pp_idx; data[0] = peer ? peer->ifindex : 0; idx = 1; for (i = 0; i < dev->real_num_rx_queues; i++) { const struct veth_rq_stats *rq_stats = &priv->rq[i].stats; const void *stats_base = (void *)&rq_stats->vs; unsigned int start; size_t offset; do { start = u64_stats_fetch_begin(&rq_stats->syncp); for (j = 0; j < VETH_RQ_STATS_LEN; j++) { offset = veth_rq_stats_desc[j].offset; data[idx + j] = *(u64 *)(stats_base + offset); } } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); idx += VETH_RQ_STATS_LEN; } pp_idx = idx; if (!peer) goto page_pool_stats; rcv_priv = netdev_priv(peer); for (i = 0; i < peer->real_num_rx_queues; i++) { const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats; const void *base = (void *)&rq_stats->vs; unsigned int start, tx_idx = idx; size_t offset; tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN; do { start = u64_stats_fetch_begin(&rq_stats->syncp); for (j = 0; j < VETH_TQ_STATS_LEN; j++) { offset = veth_tq_stats_desc[j].offset; data[tx_idx + j] += *(u64 *)(base + offset); } } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); pp_idx = tx_idx + VETH_TQ_STATS_LEN; } page_pool_stats: veth_get_page_pool_stats(dev, &data[pp_idx]); } static void veth_get_channels(struct net_device *dev, struct ethtool_channels *channels) { channels->tx_count = dev->real_num_tx_queues; channels->rx_count = dev->real_num_rx_queues; channels->max_tx = dev->num_tx_queues; channels->max_rx = dev->num_rx_queues; } static int veth_set_channels(struct net_device *dev, struct ethtool_channels *ch); static const struct ethtool_ops veth_ethtool_ops = { .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = veth_get_strings, .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, .get_link_ksettings = veth_get_link_ksettings, .get_ts_info = ethtool_op_get_ts_info, .get_channels = veth_get_channels, .set_channels = veth_set_channels, }; /* general routines */ static bool veth_is_xdp_frame(void *ptr) { return (unsigned long)ptr & VETH_XDP_FLAG; } static struct xdp_frame *veth_ptr_to_xdp(void *ptr) { return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); } static void *veth_xdp_to_ptr(struct xdp_frame *xdp) { return (void *)((unsigned long)xdp | VETH_XDP_FLAG); } static void veth_ptr_free(void *ptr) { if (veth_is_xdp_frame(ptr)) xdp_return_frame(veth_ptr_to_xdp(ptr)); else kfree_skb(ptr); } static void __veth_xdp_flush(struct veth_rq *rq) { /* Write ptr_ring before reading rx_notify_masked */ smp_mb(); if (!READ_ONCE(rq->rx_notify_masked) && napi_schedule_prep(&rq->xdp_napi)) { WRITE_ONCE(rq->rx_notify_masked, true); __napi_schedule(&rq->xdp_napi); } } static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb) { if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { dev_kfree_skb_any(skb); return NET_RX_DROP; } return NET_RX_SUCCESS; } static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, struct veth_rq *rq, bool xdp) { return __dev_forward_skb(dev, skb) ?: xdp ? veth_xdp_rx(rq, skb) : __netif_rx(skb); } /* return true if the specified skb has chances of GRO aggregation * Don't strive for accuracy, but try to avoid GRO overhead in the most * common scenarios. * When XDP is enabled, all traffic is considered eligible, as the xmit * device has TSO off. * When TSO is enabled on the xmit device, we are likely interested only * in UDP aggregation, explicitly check for that if the skb is suspected * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets - * to belong to locally generated UDP traffic. */ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev, const struct net_device *rcv, const struct sk_buff *skb) { return !(dev->features & NETIF_F_ALL_TSO) || (skb->destructor == sock_wfree && rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)); } static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_rq *rq = NULL; int ret = NETDEV_TX_OK; struct net_device *rcv; int length = skb->len; bool use_napi = false; int rxq; rcu_read_lock(); rcv = rcu_dereference(priv->peer); if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { kfree_skb(skb); goto drop; } rcv_priv = netdev_priv(rcv); rxq = skb_get_queue_mapping(skb); if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; /* The napi pointer is available when an XDP program is * attached or when GRO is enabled * Don't bother with napi/GRO if the skb can't be aggregated */ use_napi = rcu_access_pointer(rq->napi) && veth_skb_is_eligible_for_gro(dev, rcv, skb); } skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { if (!use_napi) dev_lstats_add(dev, length); else __veth_xdp_flush(rq); } else { drop: atomic64_inc(&priv->dropped); ret = NET_XMIT_DROP; } rcu_read_unlock(); return ret; } static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes) { struct veth_priv *priv = netdev_priv(dev); dev_lstats_read(dev, packets, bytes); return atomic64_read(&priv->dropped); } static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int i; result->peer_tq_xdp_xmit_err = 0; result->xdp_packets = 0; result->xdp_tx_err = 0; result->xdp_bytes = 0; result->rx_drops = 0; for (i = 0; i < dev->num_rx_queues; i++) { u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err; struct veth_rq_stats *stats = &priv->rq[i].stats; unsigned int start; do { start = u64_stats_fetch_begin(&stats->syncp); peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err; xdp_tx_err = stats->vs.xdp_tx_err; packets = stats->vs.xdp_packets; bytes = stats->vs.xdp_bytes; drops = stats->vs.rx_drops; } while (u64_stats_fetch_retry(&stats->syncp, start)); result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err; result->xdp_tx_err += xdp_tx_err; result->xdp_packets += packets; result->xdp_bytes += bytes; result->rx_drops += drops; } } static void veth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; struct veth_stats rx; u64 packets, bytes; tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes); tot->tx_bytes = bytes; tot->tx_packets = packets; veth_stats_rx(&rx, dev); tot->tx_dropped += rx.xdp_tx_err; tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; tot->rx_bytes = rx.xdp_bytes; tot->rx_packets = rx.xdp_packets; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (peer) { veth_stats_tx(peer, &packets, &bytes); tot->rx_bytes += bytes; tot->rx_packets += packets; veth_stats_rx(&rx, peer); tot->tx_dropped += rx.peer_tq_xdp_xmit_err; tot->rx_dropped += rx.xdp_tx_err; tot->tx_bytes += rx.xdp_bytes; tot->tx_packets += rx.xdp_packets; } rcu_read_unlock(); } /* fake multicast ability */ static void veth_set_multicast_list(struct net_device *dev) { } static int veth_select_rxq(struct net_device *dev) { return smp_processor_id() % dev->real_num_rx_queues; } static struct net_device *veth_peer_dev(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); /* Callers must be under RCU read side. */ return rcu_dereference(priv->peer); } static int veth_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags, bool ndo_xmit) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); int i, ret = -ENXIO, nxmit = 0; struct net_device *rcv; unsigned int max_len; struct veth_rq *rq; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; rcu_read_lock(); rcv = rcu_dereference(priv->peer); if (unlikely(!rcv)) goto out; rcv_priv = netdev_priv(rcv); rq = &rcv_priv->rq[veth_select_rxq(rcv)]; /* The napi pointer is set if NAPI is enabled, which ensures that * xdp_ring is initialized on receive side and the peer device is up. */ if (!rcu_access_pointer(rq->napi)) goto out; max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; spin_lock(&rq->xdp_ring.producer_lock); for (i = 0; i < n; i++) { struct xdp_frame *frame = frames[i]; void *ptr = veth_xdp_to_ptr(frame); if (unlikely(xdp_get_frame_len(frame) > max_len || __ptr_ring_produce(&rq->xdp_ring, ptr))) break; nxmit++; } spin_unlock(&rq->xdp_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) __veth_xdp_flush(rq); ret = nxmit; if (ndo_xmit) { u64_stats_update_begin(&rq->stats.syncp); rq->stats.vs.peer_tq_xdp_xmit += nxmit; rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit; u64_stats_update_end(&rq->stats.syncp); } out: rcu_read_unlock(); return ret; } static int veth_ndo_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { int err; err = veth_xdp_xmit(dev, n, frames, flags, true); if (err < 0) { struct veth_priv *priv = netdev_priv(dev); atomic64_add(n, &priv->dropped); } return err; } static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) { int sent, i, err = 0, drops; sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false); if (sent < 0) { err = sent; sent = 0; } for (i = sent; unlikely(i < bq->count); i++) xdp_return_frame(bq->q[i]); drops = bq->count - sent; trace_xdp_bulk_tx(rq->dev, sent, drops, err); u64_stats_update_begin(&rq->stats.syncp); rq->stats.vs.xdp_tx += sent; rq->stats.vs.xdp_tx_err += drops; u64_stats_update_end(&rq->stats.syncp); bq->count = 0; } static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) { struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev); struct net_device *rcv; struct veth_rq *rcv_rq; rcu_read_lock(); veth_xdp_flush_bq(rq, bq); rcv = rcu_dereference(priv->peer); if (unlikely(!rcv)) goto out; rcv_priv = netdev_priv(rcv); rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)]; /* xdp_ring is initialized on receive side? */ if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog))) goto out; __veth_xdp_flush(rcv_rq); out: rcu_read_unlock(); } static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp, struct veth_xdp_tx_bq *bq) { struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); if (unlikely(!frame)) return -EOVERFLOW; if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE)) veth_xdp_flush_bq(rq, bq); bq->q[bq->count++] = frame; return 0; } static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq, struct xdp_frame *frame, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { struct xdp_frame orig_frame; struct bpf_prog *xdp_prog; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (likely(xdp_prog)) { struct veth_xdp_buff vxbuf; struct xdp_buff *xdp = &vxbuf.xdp; u32 act; xdp_convert_frame_to_buff(frame, xdp); xdp->rxq = &rq->xdp_rxq; vxbuf.skb = NULL; act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: if (xdp_update_frame_from_buff(xdp, frame)) goto err_xdp; break; case XDP_TX: orig_frame = *frame; xdp->rxq->mem = frame->mem; if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) { trace_xdp_exception(rq->dev, xdp_prog, act); frame = &orig_frame; stats->rx_drops++; goto err_xdp; } stats->xdp_tx++; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: orig_frame = *frame; xdp->rxq->mem = frame->mem; if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) { frame = &orig_frame; stats->rx_drops++; goto err_xdp; } stats->xdp_redirect++; rcu_read_unlock(); goto xdp_xmit; default: bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); fallthrough; case XDP_DROP: stats->xdp_drops++; goto err_xdp; } } rcu_read_unlock(); return frame; err_xdp: rcu_read_unlock(); xdp_return_frame(frame); xdp_xmit: return NULL; } /* frames array contains VETH_XDP_BATCH at most */ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames, int n_xdpf, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { void *skbs[VETH_XDP_BATCH]; int i; if (xdp_alloc_skb_bulk(skbs, n_xdpf, GFP_ATOMIC | __GFP_ZERO) < 0) { for (i = 0; i < n_xdpf; i++) xdp_return_frame(frames[i]); stats->rx_drops += n_xdpf; return; } for (i = 0; i < n_xdpf; i++) { struct sk_buff *skb = skbs[i]; skb = __xdp_build_skb_from_frame(frames[i], skb, rq->dev); if (!skb) { xdp_return_frame(frames[i]); stats->rx_drops++; continue; } napi_gro_receive(&rq->xdp_napi, skb); } } static void veth_xdp_get(struct xdp_buff *xdp) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); int i; get_page(virt_to_page(xdp->data)); if (likely(!xdp_buff_has_frags(xdp))) return; for (i = 0; i < sinfo->nr_frags; i++) __skb_frag_ref(&sinfo->frags[i]); } static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, struct xdp_buff *xdp, struct sk_buff **pskb) { struct sk_buff *skb = *pskb; u32 frame_sz; if (skb_shared(skb) || skb_head_is_locked(skb) || skb_shinfo(skb)->nr_frags || skb_headroom(skb) < XDP_PACKET_HEADROOM) { u32 size, len, max_head_size, off; struct sk_buff *nskb; struct page *page; int i, head_off; /* We need a private copy of the skb and data buffers since * the ebpf program can modify it. We segment the original skb * into order-0 pages without linearize it. * * Make sure we have enough space for linear and paged area */ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM); if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size) goto drop; /* Allocate skb head */ page = page_pool_dev_alloc_pages(rq->page_pool); if (!page) goto drop; nskb = napi_build_skb(page_address(page), PAGE_SIZE); if (!nskb) { page_pool_put_full_page(rq->page_pool, page, true); goto drop; } skb_reserve(nskb, VETH_XDP_HEADROOM); skb_copy_header(nskb, skb); skb_mark_for_recycle(nskb); size = min_t(u32, skb->len, max_head_size); if (skb_copy_bits(skb, 0, nskb->data, size)) { consume_skb(nskb); goto drop; } skb_put(nskb, size); head_off = skb_headroom(nskb) - skb_headroom(skb); skb_headers_offset_update(nskb, head_off); /* Allocate paged area of new skb */ off = size; len = skb->len - off; for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { page = page_pool_dev_alloc_pages(rq->page_pool); if (!page) { consume_skb(nskb); goto drop; } size = min_t(u32, len, PAGE_SIZE); skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE); if (skb_copy_bits(skb, off, page_address(page), size)) { consume_skb(nskb); goto drop; } len -= size; off += size; } consume_skb(skb); skb = nskb; } /* SKB "head" area always have tailroom for skb_shared_info */ frame_sz = skb_end_pointer(skb) - skb->head; frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); xdp_prepare_buff(xdp, skb->head, skb_headroom(skb), skb_headlen(skb), true); if (skb_is_nonlinear(skb)) { skb_shinfo(skb)->xdp_frags_size = skb->data_len; xdp_buff_set_frags_flag(xdp); } else { xdp_buff_clear_frags_flag(xdp); } *pskb = skb; return 0; drop: consume_skb(skb); *pskb = NULL; return -ENOMEM; } static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { void *orig_data, *orig_data_end; struct bpf_prog *xdp_prog; struct veth_xdp_buff vxbuf; struct xdp_buff *xdp = &vxbuf.xdp; u32 act, metalen; int off; skb_prepare_for_gro(skb); rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (unlikely(!xdp_prog)) { rcu_read_unlock(); goto out; } __skb_push(skb, skb->data - skb_mac_header(skb)); if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb)) goto drop; vxbuf.skb = skb; orig_data = xdp->data; orig_data_end = xdp->data_end; act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: veth_xdp_get(xdp); consume_skb(skb); xdp->rxq->mem = rq->xdp_mem; if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) { trace_xdp_exception(rq->dev, xdp_prog, act); stats->rx_drops++; goto err_xdp; } stats->xdp_tx++; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: veth_xdp_get(xdp); consume_skb(skb); xdp->rxq->mem = rq->xdp_mem; if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) { stats->rx_drops++; goto err_xdp; } stats->xdp_redirect++; rcu_read_unlock(); goto xdp_xmit; default: bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); fallthrough; case XDP_DROP: stats->xdp_drops++; goto xdp_drop; } rcu_read_unlock(); /* check if bpf_xdp_adjust_head was used */ off = orig_data - xdp->data; if (off > 0) __skb_push(skb, off); else if (off < 0) __skb_pull(skb, -off); skb_reset_mac_header(skb); /* check if bpf_xdp_adjust_tail was used */ off = xdp->data_end - orig_data_end; if (off != 0) __skb_put(skb, off); /* positive on grow, negative on shrink */ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers * (e.g. bpf_xdp_adjust_tail), we need to update data_len here. */ if (xdp_buff_has_frags(xdp)) skb->data_len = skb_shinfo(skb)->xdp_frags_size; else skb->data_len = 0; skb->protocol = eth_type_trans(skb, rq->dev); metalen = xdp->data - xdp->data_meta; if (metalen) skb_metadata_set(skb, metalen); out: return skb; drop: stats->rx_drops++; xdp_drop: rcu_read_unlock(); kfree_skb(skb); return NULL; err_xdp: rcu_read_unlock(); xdp_return_buff(xdp); xdp_xmit: return NULL; } static int veth_xdp_rcv(struct veth_rq *rq, int budget, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { int i, done = 0, n_xdpf = 0; void *xdpf[VETH_XDP_BATCH]; for (i = 0; i < budget; i++) { void *ptr = __ptr_ring_consume(&rq->xdp_ring); if (!ptr) break; if (veth_is_xdp_frame(ptr)) { /* ndo_xdp_xmit */ struct xdp_frame *frame = veth_ptr_to_xdp(ptr); stats->xdp_bytes += xdp_get_frame_len(frame); frame = veth_xdp_rcv_one(rq, frame, bq, stats); if (frame) { /* XDP_PASS */ xdpf[n_xdpf++] = frame; if (n_xdpf == VETH_XDP_BATCH) { veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats); n_xdpf = 0; } } } else { /* ndo_start_xmit */ struct sk_buff *skb = ptr; stats->xdp_bytes += skb->len; skb = veth_xdp_rcv_skb(rq, skb, bq, stats); if (skb) { if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC)) netif_receive_skb(skb); else napi_gro_receive(&rq->xdp_napi, skb); } } done++; } if (n_xdpf) veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats); u64_stats_update_begin(&rq->stats.syncp); rq->stats.vs.xdp_redirect += stats->xdp_redirect; rq->stats.vs.xdp_bytes += stats->xdp_bytes; rq->stats.vs.xdp_drops += stats->xdp_drops; rq->stats.vs.rx_drops += stats->rx_drops; rq->stats.vs.xdp_packets += done; u64_stats_update_end(&rq->stats.syncp); return done; } static int veth_poll(struct napi_struct *napi, int budget) { struct veth_rq *rq = container_of(napi, struct veth_rq, xdp_napi); struct veth_stats stats = {}; struct veth_xdp_tx_bq bq; int done; bq.count = 0; xdp_set_return_frame_no_direct(); done = veth_xdp_rcv(rq, budget, &bq, &stats); if (stats.xdp_redirect > 0) xdp_do_flush(); if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { if (napi_schedule_prep(&rq->xdp_napi)) { WRITE_ONCE(rq->rx_notify_masked, true); __napi_schedule(&rq->xdp_napi); } } } if (stats.xdp_tx > 0) veth_xdp_flush(rq, &bq); xdp_clear_return_frame_no_direct(); return done; } static int veth_create_page_pool(struct veth_rq *rq) { struct page_pool_params pp_params = { .order = 0, .pool_size = VETH_RING_SIZE, .nid = NUMA_NO_NODE, .dev = &rq->dev->dev, }; rq->page_pool = page_pool_create(&pp_params); if (IS_ERR(rq->page_pool)) { int err = PTR_ERR(rq->page_pool); rq->page_pool = NULL; return err; } return 0; } static int __veth_napi_enable_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err, i; for (i = start; i < end; i++) { err = veth_create_page_pool(&priv->rq[i]); if (err) goto err_page_pool; } for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); if (err) goto err_xdp_ring; } for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; napi_enable(&rq->xdp_napi); rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); } return 0; err_xdp_ring: for (i--; i >= start; i--) ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); i = end; err_page_pool: for (i--; i >= start; i--) { page_pool_destroy(priv->rq[i].page_pool); priv->rq[i].page_pool = NULL; } return err; } static int __veth_napi_enable(struct net_device *dev) { return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); } static void veth_napi_del_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int i; for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; rcu_assign_pointer(priv->rq[i].napi, NULL); napi_disable(&rq->xdp_napi); __netif_napi_del(&rq->xdp_napi); } synchronize_net(); for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; rq->rx_notify_masked = false; ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free); } for (i = start; i < end; i++) { page_pool_destroy(priv->rq[i].page_pool); priv->rq[i].page_pool = NULL; } } static void veth_napi_del(struct net_device *dev) { veth_napi_del_range(dev, 0, dev->real_num_rx_queues); } static bool veth_gro_requested(const struct net_device *dev) { return !!(dev->wanted_features & NETIF_F_GRO); } static int veth_enable_xdp_range(struct net_device *dev, int start, int end, bool napi_already_on) { struct veth_priv *priv = netdev_priv(dev); int err, i; for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; if (!napi_already_on) netif_napi_add(dev, &rq->xdp_napi, veth_poll); err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); if (err < 0) goto err_rxq_reg; err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err < 0) goto err_reg_mem; /* Save original mem info as it can be overwritten */ rq->xdp_mem = rq->xdp_rxq.mem; } return 0; err_reg_mem: xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); err_rxq_reg: for (i--; i >= start; i--) { struct veth_rq *rq = &priv->rq[i]; xdp_rxq_info_unreg(&rq->xdp_rxq); if (!napi_already_on) netif_napi_del(&rq->xdp_napi); } return err; } static void veth_disable_xdp_range(struct net_device *dev, int start, int end, bool delete_napi) { struct veth_priv *priv = netdev_priv(dev); int i; for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; rq->xdp_rxq.mem = rq->xdp_mem; xdp_rxq_info_unreg(&rq->xdp_rxq); if (delete_napi) netif_napi_del(&rq->xdp_napi); } } static int veth_enable_xdp(struct net_device *dev) { bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP); struct veth_priv *priv = netdev_priv(dev); int err, i; if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on); if (err) return err; if (!napi_already_on) { err = __veth_napi_enable(dev); if (err) { veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true); return err; } if (!veth_gro_requested(dev)) { /* user-space did not require GRO, but adding XDP * is supposed to get GRO working */ dev->features |= NETIF_F_GRO; netdev_features_change(dev); } } } for (i = 0; i < dev->real_num_rx_queues; i++) { rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog); rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); } return 0; } static void veth_disable_xdp(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int i; for (i = 0; i < dev->real_num_rx_queues; i++) rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); if (!netif_running(dev) || !veth_gro_requested(dev)) { veth_napi_del(dev); /* if user-space did not require GRO, since adding XDP * enabled it, clear it now */ if (!veth_gro_requested(dev) && netif_running(dev)) { dev->features &= ~NETIF_F_GRO; netdev_features_change(dev); } } veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false); } static int veth_napi_enable_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err, i; for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; netif_napi_add(dev, &rq->xdp_napi, veth_poll); } err = __veth_napi_enable_range(dev, start, end); if (err) { for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; netif_napi_del(&rq->xdp_napi); } return err; } return err; } static int veth_napi_enable(struct net_device *dev) { return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); } static void veth_disable_range_safe(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); if (start >= end) return; if (priv->_xdp_prog) { veth_napi_del_range(dev, start, end); veth_disable_xdp_range(dev, start, end, false); } else if (veth_gro_requested(dev)) { veth_napi_del_range(dev, start, end); } } static int veth_enable_range_safe(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err; if (start >= end) return 0; if (priv->_xdp_prog) { /* these channels are freshly initialized, napi is not on there even * when GRO is requeste */ err = veth_enable_xdp_range(dev, start, end, false); if (err) return err; err = __veth_napi_enable_range(dev, start, end); if (err) { /* on error always delete the newly added napis */ veth_disable_xdp_range(dev, start, end, true); return err; } } else if (veth_gro_requested(dev)) { return veth_napi_enable_range(dev, start, end); } return 0; } static void veth_set_xdp_features(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; peer = rtnl_dereference(priv->peer); if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) { struct veth_priv *priv_peer = netdev_priv(peer); xdp_features_t val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_RX_SG; if (priv_peer->_xdp_prog || veth_gro_requested(peer)) val |= NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG; xdp_set_features_flag(dev, val); } else { xdp_clear_features_flag(dev); } } static int veth_set_channels(struct net_device *dev, struct ethtool_channels *ch) { struct veth_priv *priv = netdev_priv(dev); unsigned int old_rx_count, new_rx_count; struct veth_priv *peer_priv; struct net_device *peer; int err; /* sanity check. Upper bounds are already enforced by the caller */ if (!ch->rx_count || !ch->tx_count) return -EINVAL; /* avoid braking XDP, if that is enabled */ peer = rtnl_dereference(priv->peer); peer_priv = peer ? netdev_priv(peer) : NULL; if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues) return -EINVAL; if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues) return -EINVAL; old_rx_count = dev->real_num_rx_queues; new_rx_count = ch->rx_count; if (netif_running(dev)) { /* turn device off */ netif_carrier_off(dev); if (peer) netif_carrier_off(peer); /* try to allocate new resurces, as needed*/ err = veth_enable_range_safe(dev, old_rx_count, new_rx_count); if (err) goto out; } err = netif_set_real_num_rx_queues(dev, ch->rx_count); if (err) goto revert; err = netif_set_real_num_tx_queues(dev, ch->tx_count); if (err) { int err2 = netif_set_real_num_rx_queues(dev, old_rx_count); /* this error condition could happen only if rx and tx change * in opposite directions (e.g. tx nr raises, rx nr decreases) * and we can't do anything to fully restore the original * status */ if (err2) pr_warn("Can't restore rx queues config %d -> %d %d", new_rx_count, old_rx_count, err2); else goto revert; } out: if (netif_running(dev)) { /* note that we need to swap the arguments WRT the enable part * to identify the range we have to disable */ veth_disable_range_safe(dev, new_rx_count, old_rx_count); netif_carrier_on(dev); if (peer) netif_carrier_on(peer); } /* update XDP supported features */ veth_set_xdp_features(dev); if (peer) veth_set_xdp_features(peer); return err; revert: new_rx_count = old_rx_count; old_rx_count = ch->rx_count; goto out; } static int veth_open(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); int err; if (!peer) return -ENOTCONN; if (priv->_xdp_prog) { err = veth_enable_xdp(dev); if (err) return err; } else if (veth_gro_requested(dev)) { err = veth_napi_enable(dev); if (err) return err; } if (peer->flags & IFF_UP) { netif_carrier_on(dev); netif_carrier_on(peer); } veth_set_xdp_features(dev); return 0; } static int veth_close(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); netif_carrier_off(dev); if (peer) netif_carrier_off(peer); if (priv->_xdp_prog) veth_disable_xdp(dev); else if (veth_gro_requested(dev)) veth_napi_del(dev); return 0; } static int is_valid_veth_mtu(int mtu) { return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU; } static int veth_alloc_queues(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int i; priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT); if (!priv->rq) return -ENOMEM; for (i = 0; i < dev->num_rx_queues; i++) { priv->rq[i].dev = dev; u64_stats_init(&priv->rq[i].stats.syncp); } return 0; } static void veth_free_queues(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); kfree(priv->rq); } static int veth_dev_init(struct net_device *dev) { int err; dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); if (!dev->lstats) return -ENOMEM; err = veth_alloc_queues(dev); if (err) { free_percpu(dev->lstats); return err; } return 0; } static void veth_dev_free(struct net_device *dev) { veth_free_queues(dev); free_percpu(dev->lstats); } #ifdef CONFIG_NET_POLL_CONTROLLER static void veth_poll_controller(struct net_device *dev) { /* veth only receives frames when its peer sends one * Since it has nothing to do with disabling irqs, we are guaranteed * never to have pending data when we poll for it so * there is nothing to do here. * * We need this though so netpoll recognizes us as an interface that * supports polling, which enables bridge devices in virt setups to * still use netconsole */ } #endif /* CONFIG_NET_POLL_CONTROLLER */ static int veth_get_iflink(const struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; int iflink; rcu_read_lock(); peer = rcu_dereference(priv->peer); iflink = peer ? peer->ifindex : 0; rcu_read_unlock(); return iflink; } static netdev_features_t veth_fix_features(struct net_device *dev, netdev_features_t features) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; peer = rtnl_dereference(priv->peer); if (peer) { struct veth_priv *peer_priv = netdev_priv(peer); if (peer_priv->_xdp_prog) features &= ~NETIF_F_GSO_SOFTWARE; } if (priv->_xdp_prog) features |= NETIF_F_GRO; return features; } static int veth_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = features ^ dev->features; struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; int err; if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog) return 0; peer = rtnl_dereference(priv->peer); if (features & NETIF_F_GRO) { err = veth_napi_enable(dev); if (err) return err; if (peer) xdp_features_set_redirect_target(peer, true); } else { if (peer) xdp_features_clear_redirect_target(peer); veth_napi_del(dev); } return 0; } static void veth_set_rx_headroom(struct net_device *dev, int new_hr) { struct veth_priv *peer_priv, *priv = netdev_priv(dev); struct net_device *peer; if (new_hr < 0) new_hr = 0; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (unlikely(!peer)) goto out; peer_priv = netdev_priv(peer); priv->requested_headroom = new_hr; new_hr = max(priv->requested_headroom, peer_priv->requested_headroom); dev->needed_headroom = new_hr; peer->needed_headroom = new_hr; out: rcu_read_unlock(); } static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct veth_priv *priv = netdev_priv(dev); struct bpf_prog *old_prog; struct net_device *peer; unsigned int max_mtu; int err; old_prog = priv->_xdp_prog; priv->_xdp_prog = prog; peer = rtnl_dereference(priv->peer); if (prog) { if (!peer) { NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached"); err = -ENOTCONN; goto err; } max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) - peer->hard_header_len; /* Allow increasing the max_mtu if the program supports * XDP fragments. */ if (prog->aux->xdp_has_frags) max_mtu += PAGE_SIZE * MAX_SKB_FRAGS; if (peer->mtu > max_mtu) { NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP"); err = -ERANGE; goto err; } if (dev->real_num_rx_queues < peer->real_num_tx_queues) { NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues"); err = -ENOSPC; goto err; } if (dev->flags & IFF_UP) { err = veth_enable_xdp(dev); if (err) { NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed"); goto err; } } if (!old_prog) { peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; peer->max_mtu = max_mtu; } xdp_features_set_redirect_target(peer, true); } if (old_prog) { if (!prog) { if (peer && !veth_gro_requested(dev)) xdp_features_clear_redirect_target(peer); if (dev->flags & IFF_UP) veth_disable_xdp(dev); if (peer) { peer->hw_features |= NETIF_F_GSO_SOFTWARE; peer->max_mtu = ETH_MAX_MTU; } } bpf_prog_put(old_prog); } if ((!!old_prog ^ !!prog) && peer) netdev_update_features(peer); return 0; err: priv->_xdp_prog = old_prog; return err; } static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return veth_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) { struct veth_xdp_buff *_ctx = (void *)ctx; if (!_ctx->skb) return -ENODATA; *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp; return 0; } static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, enum xdp_rss_hash_type *rss_type) { struct veth_xdp_buff *_ctx = (void *)ctx; struct sk_buff *skb = _ctx->skb; if (!skb) return -ENODATA; *hash = skb_get_hash(skb); *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE; return 0; } static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, .ndo_stop = veth_close, .ndo_start_xmit = veth_xmit, .ndo_get_stats64 = veth_get_stats64, .ndo_set_rx_mode = veth_set_multicast_list, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = veth_poll_controller, #endif .ndo_get_iflink = veth_get_iflink, .ndo_fix_features = veth_fix_features, .ndo_set_features = veth_set_features, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_bpf = veth_xdp, .ndo_xdp_xmit = veth_ndo_xdp_xmit, .ndo_get_peer_dev = veth_peer_dev, }; static const struct xdp_metadata_ops veth_xdp_metadata_ops = { .xmo_rx_timestamp = veth_xdp_rx_timestamp, .xmo_rx_hash = veth_xdp_rx_hash, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX ) static void veth_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_PHONY_HEADROOM; dev->netdev_ops = &veth_netdev_ops; dev->xdp_metadata_ops = &veth_xdp_metadata_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; dev->features |= VETH_FEATURES; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); dev->needs_free_netdev = true; dev->priv_destructor = veth_dev_free; dev->max_mtu = ETH_MAX_MTU; dev->hw_features = VETH_FEATURES; dev->hw_enc_features = VETH_FEATURES; dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; netif_set_tso_max_size(dev, GSO_MAX_SIZE); } /* * netlink interface */ static int veth_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (tb[IFLA_MTU]) { if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU]))) return -EINVAL; } return 0; } static struct rtnl_link_ops veth_link_ops; static void veth_disable_gro(struct net_device *dev) { dev->features &= ~NETIF_F_GRO; dev->wanted_features &= ~NETIF_F_GRO; netdev_update_features(dev); } static int veth_init_queues(struct net_device *dev, struct nlattr *tb[]) { int err; if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) { err = netif_set_real_num_tx_queues(dev, 1); if (err) return err; } if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) { err = netif_set_real_num_rx_queues(dev, 1); if (err) return err; } return 0; } static int veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { int err; struct net_device *peer; struct veth_priv *priv; char ifname[IFNAMSIZ]; struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; unsigned char name_assign_type; struct ifinfomsg *ifmp; struct net *net; /* * create and register peer first */ if (data != NULL && data[VETH_INFO_PEER] != NULL) { struct nlattr *nla_peer; nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; err = veth_validate(peer_tb, NULL, extack); if (err < 0) return err; tbp = peer_tb; } else { ifmp = NULL; tbp = tb; } if (ifmp && tbp[IFLA_IFNAME]) { nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); name_assign_type = NET_NAME_ENUM; } net = rtnl_link_get_net(src_net, tbp); if (IS_ERR(net)) return PTR_ERR(net); peer = rtnl_create_link(net, ifname, name_assign_type, &veth_link_ops, tbp, extack); if (IS_ERR(peer)) { put_net(net); return PTR_ERR(peer); } if (!ifmp || !tbp[IFLA_ADDRESS]) eth_hw_addr_random(peer); if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; netif_inherit_tso_max(peer, dev); err = register_netdevice(peer); put_net(net); net = NULL; if (err < 0) goto err_register_peer; /* keep GRO disabled by default to be consistent with the established * veth behavior */ veth_disable_gro(peer); netif_carrier_off(peer); err = rtnl_configure_link(peer, ifmp, 0, NULL); if (err < 0) goto err_configure_peer; /* * register dev last * * note, that since we've registered new device the dev's name * should be re-allocated */ if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); if (tb[IFLA_IFNAME]) nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); err = register_netdevice(dev); if (err < 0) goto err_register_dev; netif_carrier_off(dev); /* * tie the deviced together */ priv = netdev_priv(dev); rcu_assign_pointer(priv->peer, peer); err = veth_init_queues(dev, tb); if (err) goto err_queues; priv = netdev_priv(peer); rcu_assign_pointer(priv->peer, dev); err = veth_init_queues(peer, tb); if (err) goto err_queues; veth_disable_gro(dev); /* update XDP supported features */ veth_set_xdp_features(dev); veth_set_xdp_features(peer); return 0; err_queues: unregister_netdevice(dev); err_register_dev: /* nothing to do */ err_configure_peer: unregister_netdevice(peer); return err; err_register_peer: free_netdev(peer); return err; } static void veth_dellink(struct net_device *dev, struct list_head *head) { struct veth_priv *priv; struct net_device *peer; priv = netdev_priv(dev); peer = rtnl_dereference(priv->peer); /* Note : dellink() is called from default_device_exit_batch(), * before a rcu_synchronize() point. The devices are guaranteed * not being freed before one RCU grace period. */ RCU_INIT_POINTER(priv->peer, NULL); unregister_netdevice_queue(dev, head); if (peer) { priv = netdev_priv(peer); RCU_INIT_POINTER(priv->peer, NULL); unregister_netdevice_queue(peer, head); } } static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = { [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, }; static struct net *veth_get_link_net(const struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); return peer ? dev_net(peer) : dev_net(dev); } static unsigned int veth_get_num_queues(void) { /* enforce the same queue limit as rtnl_create_link */ int queues = num_possible_cpus(); if (queues > 4096) queues = 4096; return queues; } static struct rtnl_link_ops veth_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct veth_priv), .setup = veth_setup, .validate = veth_validate, .newlink = veth_newlink, .dellink = veth_dellink, .policy = veth_policy, .maxtype = VETH_INFO_MAX, .get_link_net = veth_get_link_net, .get_num_tx_queues = veth_get_num_queues, .get_num_rx_queues = veth_get_num_queues, }; /* * init/fini */ static __init int veth_init(void) { return rtnl_link_register(&veth_link_ops); } static __exit void veth_exit(void) { rtnl_link_unregister(&veth_link_ops); } module_init(veth_init); module_exit(veth_exit); MODULE_DESCRIPTION("Virtual Ethernet Tunnel"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
linux-master
drivers/net/veth.c
/* * Equalizer Load-balancer for serial network interfaces. * * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes * NCM: Network and Communications Management, Inc. * * (c) Copyright 2002 David S. Miller ([email protected]) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * The author may be reached as [email protected], or C/O * NCM * Attn: Simon Janes * 6803 Whittier Ave * McLean VA 22101 * Phone: 1-703-847-0040 ext 103 */ /* * Sources: * skeleton.c by Donald Becker. * Inspirations: * The Harried and Overworked Alan Cox * Conspiracies: * The Alan Cox and Mike McLagan plot to get someone else to do the code, * which turned out to be me. */ /* * $Log: eql.c,v $ * Revision 1.2 1996/04/11 17:51:52 guru * Added one-line eql_remove_slave patch. * * Revision 1.1 1996/04/11 17:44:17 guru * Initial revision * * Revision 3.13 1996/01/21 15:17:18 alan * tx_queue_len changes. * reformatted. * * Revision 3.12 1995/03/22 21:07:51 anarchy * Added capable() checks on configuration. * Moved header file. * * Revision 3.11 1995/01/19 23:14:31 guru * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - * (priority_Bps) + bytes_queued * 8; * * Revision 3.10 1995/01/19 23:07:53 guru * back to * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - * (priority_Bps) + bytes_queued; * * Revision 3.9 1995/01/19 22:38:20 guru * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - * (priority_Bps) + bytes_queued * 4; * * Revision 3.8 1995/01/19 22:30:55 guru * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - * (priority_Bps) + bytes_queued * 2; * * Revision 3.7 1995/01/19 21:52:35 guru * printk's trimmed out. * * Revision 3.6 1995/01/19 21:49:56 guru * This is working pretty well. I gained 1 K/s in speed.. now it's just * robustness and printk's to be diked out. * * Revision 3.5 1995/01/18 22:29:59 guru * still crashes the kernel when the lock_wait thing is woken up. * * Revision 3.4 1995/01/18 21:59:47 guru * Broken set-bit locking snapshot * * Revision 3.3 1995/01/17 22:09:18 guru * infinite sleep in a lock somewhere.. * * Revision 3.2 1995/01/15 16:46:06 guru * Log trimmed of non-pertinent 1.x branch messages * * Revision 3.1 1995/01/15 14:41:45 guru * New Scheduler and timer stuff... * * Revision 1.15 1995/01/15 14:29:02 guru * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one * with the dumber scheduler * * Revision 1.14 1995/01/15 02:37:08 guru * shock.. the kept-new-versions could have zonked working * stuff.. shudder * * Revision 1.13 1995/01/15 02:36:31 guru * big changes * * scheduler was torn out and replaced with something smarter * * global names not prefixed with eql_ were renamed to protect * against namespace collisions * * a few more abstract interfaces were added to facilitate any * potential change of datastructure. the driver is still using * a linked list of slaves. going to a heap would be a bit of * an overkill. * * this compiles fine with no warnings. * * the locking mechanism and timer stuff must be written however, * this version will not work otherwise * * Sorry, I had to rewrite most of this for 2.5.x -DaveM */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/compat.h> #include <linux/capability.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <net/net_namespace.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/if_eql.h> #include <linux/pkt_sched.h> #include <linux/uaccess.h> static int eql_open(struct net_device *dev); static int eql_close(struct net_device *dev); static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd); static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); static void eql_timer(struct timer_list *t) { equalizer_t *eql = from_timer(eql, t, timer); struct list_head *this, *tmp, *head; spin_lock(&eql->queue.lock); head = &eql->queue.all_slaves; list_for_each_safe(this, tmp, head) { slave_t *slave = list_entry(this, slave_t, list); if ((slave->dev->flags & IFF_UP) == IFF_UP) { slave->bytes_queued -= slave->priority_Bps; if (slave->bytes_queued < 0) slave->bytes_queued = 0; } else { eql_kill_one_slave(&eql->queue, slave); } } spin_unlock(&eql->queue.lock); eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; add_timer(&eql->timer); } static const char version[] __initconst = "Equalizer2002: Simon Janes ([email protected]) and David S. Miller ([email protected])"; static const struct net_device_ops eql_netdev_ops = { .ndo_open = eql_open, .ndo_stop = eql_close, .ndo_siocdevprivate = eql_siocdevprivate, .ndo_start_xmit = eql_slave_xmit, }; static void __init eql_setup(struct net_device *dev) { equalizer_t *eql = netdev_priv(dev); timer_setup(&eql->timer, eql_timer, 0); eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; spin_lock_init(&eql->queue.lock); INIT_LIST_HEAD(&eql->queue.all_slaves); eql->queue.master_dev = dev; dev->netdev_ops = &eql_netdev_ops; /* * Now we undo some of the things that eth_setup does * that we don't like */ dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */ dev->flags = IFF_MASTER; dev->type = ARPHRD_SLIP; dev->tx_queue_len = 5; /* Hands them off fast */ netif_keep_dst(dev); } static int eql_open(struct net_device *dev) { equalizer_t *eql = netdev_priv(dev); /* XXX We should force this off automatically for the user. */ netdev_info(dev, "remember to turn off Van-Jacobson compression on your slave devices\n"); BUG_ON(!list_empty(&eql->queue.all_slaves)); eql->min_slaves = 1; eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ add_timer(&eql->timer); return 0; } static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) { list_del(&slave->list); queue->num_slaves--; slave->dev->flags &= ~IFF_SLAVE; netdev_put(slave->dev, &slave->dev_tracker); kfree(slave); } static void eql_kill_slave_queue(slave_queue_t *queue) { struct list_head *head, *tmp, *this; spin_lock_bh(&queue->lock); head = &queue->all_slaves; list_for_each_safe(this, tmp, head) { slave_t *s = list_entry(this, slave_t, list); eql_kill_one_slave(queue, s); } spin_unlock_bh(&queue->lock); } static int eql_close(struct net_device *dev) { equalizer_t *eql = netdev_priv(dev); /* * The timer has to be stopped first before we start hacking away * at the data structure it scans every so often... */ del_timer_sync(&eql->timer); eql_kill_slave_queue(&eql->queue); return 0; } static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq); static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq); static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc); static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc); static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc); static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc); static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG && !capable(CAP_NET_ADMIN)) return -EPERM; if (in_compat_syscall()) /* to be implemented */ return -EOPNOTSUPP; switch (cmd) { case EQL_ENSLAVE: return eql_enslave(dev, data); case EQL_EMANCIPATE: return eql_emancipate(dev, data); case EQL_GETSLAVECFG: return eql_g_slave_cfg(dev, data); case EQL_SETSLAVECFG: return eql_s_slave_cfg(dev, data); case EQL_GETMASTRCFG: return eql_g_master_cfg(dev, data); case EQL_SETMASTRCFG: return eql_s_master_cfg(dev, data); default: return -EOPNOTSUPP; } } /* queue->lock must be held */ static slave_t *__eql_schedule_slaves(slave_queue_t *queue) { unsigned long best_load = ~0UL; struct list_head *this, *tmp, *head; slave_t *best_slave; best_slave = NULL; /* Make a pass to set the best slave. */ head = &queue->all_slaves; list_for_each_safe(this, tmp, head) { slave_t *slave = list_entry(this, slave_t, list); unsigned long slave_load, bytes_queued, priority_Bps; /* Go through the slave list once, updating best_slave * whenever a new best_load is found. */ bytes_queued = slave->bytes_queued; priority_Bps = slave->priority_Bps; if ((slave->dev->flags & IFF_UP) == IFF_UP) { slave_load = (~0UL - (~0UL / 2)) - (priority_Bps) + bytes_queued * 8; if (slave_load < best_load) { best_load = slave_load; best_slave = slave; } } else { /* We found a dead slave, kill it. */ eql_kill_one_slave(queue, slave); } } return best_slave; } static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) { equalizer_t *eql = netdev_priv(dev); slave_t *slave; spin_lock(&eql->queue.lock); slave = __eql_schedule_slaves(&eql->queue); if (slave) { struct net_device *slave_dev = slave->dev; skb->dev = slave_dev; skb->priority = TC_PRIO_FILLER; slave->bytes_queued += skb->len; dev_queue_xmit(skb); dev->stats.tx_packets++; } else { dev->stats.tx_dropped++; dev_kfree_skb(skb); } spin_unlock(&eql->queue.lock); return NETDEV_TX_OK; } /* * Private ioctl functions */ /* queue->lock must be held */ static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) { struct list_head *this, *head; head = &queue->all_slaves; list_for_each(this, head) { slave_t *slave = list_entry(this, slave_t, list); if (slave->dev == dev) return slave; } return NULL; } static inline int eql_is_full(slave_queue_t *queue) { equalizer_t *eql = netdev_priv(queue->master_dev); if (queue->num_slaves >= eql->max_slaves) return 1; return 0; } /* queue->lock must be held */ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) { if (!eql_is_full(queue)) { slave_t *duplicate_slave = NULL; duplicate_slave = __eql_find_slave_dev(queue, slave->dev); if (duplicate_slave) eql_kill_one_slave(queue, duplicate_slave); netdev_hold(slave->dev, &slave->dev_tracker, GFP_ATOMIC); list_add(&slave->list, &queue->all_slaves); queue->num_slaves++; slave->dev->flags |= IFF_SLAVE; return 0; } return -ENOSPC; } static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp) { struct net_device *slave_dev; slaving_request_t srq; if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) return -EFAULT; slave_dev = __dev_get_by_name(&init_net, srq.slave_name); if (!slave_dev) return -ENODEV; if ((master_dev->flags & IFF_UP) == IFF_UP) { /* slave is not a master & not already a slave: */ if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) { slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL); equalizer_t *eql = netdev_priv(master_dev); int ret; if (!s) return -ENOMEM; s->dev = slave_dev; s->priority = srq.priority; s->priority_bps = srq.priority; s->priority_Bps = srq.priority / 8; spin_lock_bh(&eql->queue.lock); ret = __eql_insert_slave(&eql->queue, s); if (ret) kfree(s); spin_unlock_bh(&eql->queue.lock); return ret; } } return -EINVAL; } static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp) { equalizer_t *eql = netdev_priv(master_dev); struct net_device *slave_dev; slaving_request_t srq; int ret; if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) return -EFAULT; slave_dev = __dev_get_by_name(&init_net, srq.slave_name); if (!slave_dev) return -ENODEV; ret = -EINVAL; spin_lock_bh(&eql->queue.lock); if (eql_is_slave(slave_dev)) { slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev); if (slave) { eql_kill_one_slave(&eql->queue, slave); ret = 0; } } spin_unlock_bh(&eql->queue.lock); return ret; } static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) { equalizer_t *eql = netdev_priv(dev); slave_t *slave; struct net_device *slave_dev; slave_config_t sc; int ret; if (copy_from_user(&sc, scp, sizeof (slave_config_t))) return -EFAULT; slave_dev = __dev_get_by_name(&init_net, sc.slave_name); if (!slave_dev) return -ENODEV; ret = -EINVAL; spin_lock_bh(&eql->queue.lock); if (eql_is_slave(slave_dev)) { slave = __eql_find_slave_dev(&eql->queue, slave_dev); if (slave) { sc.priority = slave->priority; ret = 0; } } spin_unlock_bh(&eql->queue.lock); if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t))) ret = -EFAULT; return ret; } static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) { slave_t *slave; equalizer_t *eql; struct net_device *slave_dev; slave_config_t sc; int ret; if (copy_from_user(&sc, scp, sizeof (slave_config_t))) return -EFAULT; slave_dev = __dev_get_by_name(&init_net, sc.slave_name); if (!slave_dev) return -ENODEV; ret = -EINVAL; eql = netdev_priv(dev); spin_lock_bh(&eql->queue.lock); if (eql_is_slave(slave_dev)) { slave = __eql_find_slave_dev(&eql->queue, slave_dev); if (slave) { slave->priority = sc.priority; slave->priority_bps = sc.priority; slave->priority_Bps = sc.priority / 8; ret = 0; } } spin_unlock_bh(&eql->queue.lock); return ret; } static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) { equalizer_t *eql; master_config_t mc; memset(&mc, 0, sizeof(master_config_t)); if (eql_is_master(dev)) { eql = netdev_priv(dev); mc.max_slaves = eql->max_slaves; mc.min_slaves = eql->min_slaves; if (copy_to_user(mcp, &mc, sizeof (master_config_t))) return -EFAULT; return 0; } return -EINVAL; } static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp) { equalizer_t *eql; master_config_t mc; if (copy_from_user(&mc, mcp, sizeof (master_config_t))) return -EFAULT; if (eql_is_master(dev)) { eql = netdev_priv(dev); eql->max_slaves = mc.max_slaves; eql->min_slaves = mc.min_slaves; return 0; } return -EINVAL; } static struct net_device *dev_eql; static int __init eql_init_module(void) { int err; pr_info("%s\n", version); dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", NET_NAME_UNKNOWN, eql_setup); if (!dev_eql) return -ENOMEM; err = register_netdev(dev_eql); if (err) free_netdev(dev_eql); return err; } static void __exit eql_cleanup_module(void) { unregister_netdev(dev_eql); free_netdev(dev_eql); } module_init(eql_init_module); module_exit(eql_cleanup_module); MODULE_LICENSE("GPL");
linux-master
drivers/net/eql.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/ethtool.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <net/af_vsock.h> #include <uapi/linux/vsockmon.h> #include <linux/virtio_vsock.h> /* Virtio transport max packet size plus header */ #define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \ sizeof(struct af_vsockmon_hdr)) static int vsockmon_dev_init(struct net_device *dev) { dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); if (!dev->lstats) return -ENOMEM; return 0; } static void vsockmon_dev_uninit(struct net_device *dev) { free_percpu(dev->lstats); } struct vsockmon { struct vsock_tap vt; }; static int vsockmon_open(struct net_device *dev) { struct vsockmon *vsockmon = netdev_priv(dev); vsockmon->vt.dev = dev; vsockmon->vt.module = THIS_MODULE; return vsock_add_tap(&vsockmon->vt); } static int vsockmon_close(struct net_device *dev) { struct vsockmon *vsockmon = netdev_priv(dev); return vsock_remove_tap(&vsockmon->vt); } static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev) { dev_lstats_add(dev, skb->len); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes); stats->tx_packets = 0; stats->tx_bytes = 0; } static int vsockmon_is_valid_mtu(int new_mtu) { return new_mtu >= (int)sizeof(struct af_vsockmon_hdr); } static int vsockmon_change_mtu(struct net_device *dev, int new_mtu) { if (!vsockmon_is_valid_mtu(new_mtu)) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops vsockmon_ops = { .ndo_init = vsockmon_dev_init, .ndo_uninit = vsockmon_dev_uninit, .ndo_open = vsockmon_open, .ndo_stop = vsockmon_close, .ndo_start_xmit = vsockmon_xmit, .ndo_get_stats64 = vsockmon_get_stats64, .ndo_change_mtu = vsockmon_change_mtu, }; static u32 always_on(struct net_device *dev) { return 1; } static const struct ethtool_ops vsockmon_ethtool_ops = { .get_link = always_on, }; static void vsockmon_setup(struct net_device *dev) { dev->type = ARPHRD_VSOCKMON; dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &vsockmon_ops; dev->ethtool_ops = &vsockmon_ethtool_ops; dev->needs_free_netdev = true; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_LLTX; dev->flags = IFF_NOARP; dev->mtu = DEFAULT_MTU; } static struct rtnl_link_ops vsockmon_link_ops __read_mostly = { .kind = "vsockmon", .priv_size = sizeof(struct vsockmon), .setup = vsockmon_setup, }; static __init int vsockmon_register(void) { return rtnl_link_register(&vsockmon_link_ops); } static __exit void vsockmon_unregister(void) { rtnl_link_unregister(&vsockmon_link_ops); } module_init(vsockmon_register); module_exit(vsockmon_unregister); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Gerard Garcia <[email protected]>"); MODULE_DESCRIPTION("Vsock monitoring device. Based on nlmon device."); MODULE_ALIAS_RTNL_LINK("vsockmon");
linux-master
drivers/net/vsockmon.c
// SPDX-License-Identifier: GPL-2.0-or-later /* sb1000.c: A General Instruments SB1000 driver for linux. */ /* Written 1998 by Franco Venturi. Copyright 1998 by Franco Venturi. Copyright 1994,1995 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This driver is for the General Instruments SB1000 (internal SURFboard) The author may be reached as [email protected] Changes: 981115 Steven Hirsch <[email protected]> Linus changed the timer interface. Should work on all recent development kernels. 980608 Steven Hirsch <[email protected]> Small changes to make it work with 2.1.x kernels. Hopefully, nothing major will change before official release of Linux 2.2. Merged with 2.2 - Alan Cox */ static char version[] = "sb1000.c:v1.1.2 6/01/98 ([email protected])\n"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/if_cablemodem.h> /* for SIOGCM/SIOSCM stuff */ #include <linux/in.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/delay.h> /* for udelay() */ #include <linux/etherdevice.h> #include <linux/pnp.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/processor.h> #include <linux/uaccess.h> #ifdef SB1000_DEBUG static int sb1000_debug = SB1000_DEBUG; #else static const int sb1000_debug = 1; #endif static const int SB1000_IO_EXTENT = 8; /* SB1000 Maximum Receive Unit */ static const int SB1000_MRU = 1500; /* octects */ #define NPIDS 4 struct sb1000_private { struct sk_buff *rx_skb[NPIDS]; short rx_dlen[NPIDS]; unsigned int rx_frames; short rx_error_count; short rx_error_dpc_count; unsigned char rx_session_id[NPIDS]; unsigned char rx_frame_id[NPIDS]; unsigned char rx_pkt_type[NPIDS]; }; /* prototypes for Linux interface */ extern int sb1000_probe(struct net_device *dev); static int sb1000_open(struct net_device *dev); static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd); static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t sb1000_interrupt(int irq, void *dev_id); static int sb1000_close(struct net_device *dev); /* SB1000 hardware routines to be used during open/configuration phases */ static int card_wait_for_busy_clear(const int ioaddr[], const char* name); static int card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[]); static int card_send_command(const int ioaddr[], const char* name, const unsigned char out[], unsigned char in[]); /* SB1000 hardware routines to be used during frame rx interrupt */ static int sb1000_wait_for_ready(const int ioaddr[], const char* name); static int sb1000_wait_for_ready_clear(const int ioaddr[], const char* name); static void sb1000_send_command(const int ioaddr[], const char* name, const unsigned char out[]); static void sb1000_read_status(const int ioaddr[], unsigned char in[]); static void sb1000_issue_read_command(const int ioaddr[], const char* name); /* SB1000 commands for open/configuration */ static int sb1000_reset(const int ioaddr[], const char* name); static int sb1000_check_CRC(const int ioaddr[], const char* name); static inline int sb1000_start_get_set_command(const int ioaddr[], const char* name); static int sb1000_end_get_set_command(const int ioaddr[], const char* name); static int sb1000_activate(const int ioaddr[], const char* name); static int sb1000_get_firmware_version(const int ioaddr[], const char* name, unsigned char version[], int do_end); static int sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency); static int sb1000_set_frequency(const int ioaddr[], const char* name, int frequency); static int sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[]); static int sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]); /* SB1000 commands for frame rx interrupt */ static int sb1000_rx(struct net_device *dev); static void sb1000_error_dpc(struct net_device *dev); static const struct pnp_device_id sb1000_pnp_ids[] = { { "GIC1000", 0 }, { "", 0 } }; MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids); static const struct net_device_ops sb1000_netdev_ops = { .ndo_open = sb1000_open, .ndo_start_xmit = sb1000_start_xmit, .ndo_siocdevprivate = sb1000_siocdevprivate, .ndo_stop = sb1000_close, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) { struct net_device *dev; unsigned short ioaddr[2], irq; unsigned int serial_number; int error = -ENODEV; u8 addr[ETH_ALEN]; if (pnp_device_attach(pdev) < 0) return -ENODEV; if (pnp_activate_dev(pdev) < 0) goto out_detach; if (!pnp_port_valid(pdev, 0) || !pnp_port_valid(pdev, 1)) goto out_disable; if (!pnp_irq_valid(pdev, 0)) goto out_disable; serial_number = pdev->card->serial; ioaddr[0] = pnp_port_start(pdev, 0); ioaddr[1] = pnp_port_start(pdev, 0); irq = pnp_irq(pdev, 0); if (!request_region(ioaddr[0], 16, "sb1000")) goto out_disable; if (!request_region(ioaddr[1], 16, "sb1000")) goto out_release_region0; dev = alloc_etherdev(sizeof(struct sb1000_private)); if (!dev) { error = -ENOMEM; goto out_release_regions; } dev->base_addr = ioaddr[0]; /* mem_start holds the second I/O address */ dev->mem_start = ioaddr[1]; dev->irq = irq; if (sb1000_debug > 0) printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), " "S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr, dev->mem_start, serial_number, dev->irq); /* * The SB1000 is an rx-only cable modem device. The uplink is a modem * and we do not want to arp on it. */ dev->flags = IFF_POINTOPOINT|IFF_NOARP; SET_NETDEV_DEV(dev, &pdev->dev); if (sb1000_debug > 0) printk(KERN_NOTICE "%s", version); dev->netdev_ops = &sb1000_netdev_ops; /* hardware address is 0:0:serial_number */ addr[0] = 0; addr[1] = 0; addr[2] = serial_number >> 24 & 0xff; addr[3] = serial_number >> 16 & 0xff; addr[4] = serial_number >> 8 & 0xff; addr[5] = serial_number >> 0 & 0xff; eth_hw_addr_set(dev, addr); pnp_set_drvdata(pdev, dev); error = register_netdev(dev); if (error) goto out_free_netdev; return 0; out_free_netdev: free_netdev(dev); out_release_regions: release_region(ioaddr[1], 16); out_release_region0: release_region(ioaddr[0], 16); out_disable: pnp_disable_dev(pdev); out_detach: pnp_device_detach(pdev); return error; } static void sb1000_remove_one(struct pnp_dev *pdev) { struct net_device *dev = pnp_get_drvdata(pdev); unregister_netdev(dev); release_region(dev->base_addr, 16); release_region(dev->mem_start, 16); free_netdev(dev); } static struct pnp_driver sb1000_driver = { .name = "sb1000", .id_table = sb1000_pnp_ids, .probe = sb1000_probe_one, .remove = sb1000_remove_one, }; /* * SB1000 hardware routines to be used during open/configuration phases */ static const int TimeOutJiffies = (875 * HZ) / 100; /* Card Wait For Busy Clear (cannot be used during an interrupt) */ static int card_wait_for_busy_clear(const int ioaddr[], const char* name) { unsigned char a; unsigned long timeout; a = inb(ioaddr[0] + 7); timeout = jiffies + TimeOutJiffies; while (a & 0x80 || a & 0x40) { /* a little sleep */ yield(); a = inb(ioaddr[0] + 7); if (time_after_eq(jiffies, timeout)) { printk(KERN_WARNING "%s: card_wait_for_busy_clear timeout\n", name); return -ETIME; } } return 0; } /* Card Wait For Ready (cannot be used during an interrupt) */ static int card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[]) { unsigned char a; unsigned long timeout; a = inb(ioaddr[1] + 6); timeout = jiffies + TimeOutJiffies; while (a & 0x80 || !(a & 0x40)) { /* a little sleep */ yield(); a = inb(ioaddr[1] + 6); if (time_after_eq(jiffies, timeout)) { printk(KERN_WARNING "%s: card_wait_for_ready timeout\n", name); return -ETIME; } } in[1] = inb(ioaddr[0] + 1); in[2] = inb(ioaddr[0] + 2); in[3] = inb(ioaddr[0] + 3); in[4] = inb(ioaddr[0] + 4); in[0] = inb(ioaddr[0] + 5); in[6] = inb(ioaddr[0] + 6); in[5] = inb(ioaddr[1] + 6); return 0; } /* Card Send Command (cannot be used during an interrupt) */ static int card_send_command(const int ioaddr[], const char* name, const unsigned char out[], unsigned char in[]) { int status; if ((status = card_wait_for_busy_clear(ioaddr, name))) return status; outb(0xa0, ioaddr[0] + 6); outb(out[2], ioaddr[0] + 1); outb(out[3], ioaddr[0] + 2); outb(out[4], ioaddr[0] + 3); outb(out[5], ioaddr[0] + 4); outb(out[1], ioaddr[0] + 5); outb(0xa0, ioaddr[0] + 6); outb(out[0], ioaddr[0] + 7); if (out[0] != 0x20 && out[0] != 0x30) { if ((status = card_wait_for_ready(ioaddr, name, in))) return status; inb(ioaddr[0] + 7); if (sb1000_debug > 3) printk(KERN_DEBUG "%s: card_send_command " "out: %02x%02x%02x%02x%02x%02x " "in: %02x%02x%02x%02x%02x%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5], in[0], in[1], in[2], in[3], in[4], in[5], in[6]); } else { if (sb1000_debug > 3) printk(KERN_DEBUG "%s: card_send_command " "out: %02x%02x%02x%02x%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]); } if (out[1] != 0x1b) { if (out[0] >= 0x80 && in[0] != (out[1] | 0x80)) return -EIO; } return 0; } /* * SB1000 hardware routines to be used during frame rx interrupt */ static const int Sb1000TimeOutJiffies = 7 * HZ; /* Card Wait For Ready (to be used during frame rx) */ static int sb1000_wait_for_ready(const int ioaddr[], const char* name) { unsigned long timeout; timeout = jiffies + Sb1000TimeOutJiffies; while (inb(ioaddr[1] + 6) & 0x80) { if (time_after_eq(jiffies, timeout)) { printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n", name); return -ETIME; } } timeout = jiffies + Sb1000TimeOutJiffies; while (!(inb(ioaddr[1] + 6) & 0x40)) { if (time_after_eq(jiffies, timeout)) { printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n", name); return -ETIME; } } inb(ioaddr[0] + 7); return 0; } /* Card Wait For Ready Clear (to be used during frame rx) */ static int sb1000_wait_for_ready_clear(const int ioaddr[], const char* name) { unsigned long timeout; timeout = jiffies + Sb1000TimeOutJiffies; while (inb(ioaddr[1] + 6) & 0x80) { if (time_after_eq(jiffies, timeout)) { printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n", name); return -ETIME; } } timeout = jiffies + Sb1000TimeOutJiffies; while (inb(ioaddr[1] + 6) & 0x40) { if (time_after_eq(jiffies, timeout)) { printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n", name); return -ETIME; } } return 0; } /* Card Send Command (to be used during frame rx) */ static void sb1000_send_command(const int ioaddr[], const char* name, const unsigned char out[]) { outb(out[2], ioaddr[0] + 1); outb(out[3], ioaddr[0] + 2); outb(out[4], ioaddr[0] + 3); outb(out[5], ioaddr[0] + 4); outb(out[1], ioaddr[0] + 5); outb(out[0], ioaddr[0] + 7); if (sb1000_debug > 3) printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x" "%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]); } /* Card Read Status (to be used during frame rx) */ static void sb1000_read_status(const int ioaddr[], unsigned char in[]) { in[1] = inb(ioaddr[0] + 1); in[2] = inb(ioaddr[0] + 2); in[3] = inb(ioaddr[0] + 3); in[4] = inb(ioaddr[0] + 4); in[0] = inb(ioaddr[0] + 5); } /* Issue Read Command (to be used during frame rx) */ static void sb1000_issue_read_command(const int ioaddr[], const char* name) { static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00}; sb1000_wait_for_ready_clear(ioaddr, name); outb(0xa0, ioaddr[0] + 6); sb1000_send_command(ioaddr, name, Command0); } /* * SB1000 commands for open/configuration */ /* reset SB1000 card */ static int sb1000_reset(const int ioaddr[], const char* name) { static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; int port, status; port = ioaddr[1] + 6; outb(0x4, port); inb(port); udelay(1000); outb(0x0, port); inb(port); ssleep(1); outb(0x4, port); inb(port); udelay(1000); outb(0x0, port); inb(port); udelay(0); if ((status = card_send_command(ioaddr, name, Command0, st))) return status; if (st[3] != 0xf0) return -EIO; return 0; } /* check SB1000 firmware CRC */ static int sb1000_check_CRC(const int ioaddr[], const char* name) { static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; int status; /* check CRC */ if ((status = card_send_command(ioaddr, name, Command0, st))) return status; if (st[1] != st[3] || st[2] != st[4]) return -EIO; return 0; } static inline int sb1000_start_get_set_command(const int ioaddr[], const char* name) { static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; return card_send_command(ioaddr, name, Command0, st); } static int sb1000_end_get_set_command(const int ioaddr[], const char* name) { static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00}; static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; int status; if ((status = card_send_command(ioaddr, name, Command0, st))) return status; return card_send_command(ioaddr, name, Command1, st); } static int sb1000_activate(const int ioaddr[], const char* name) { static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00}; static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; int status; ssleep(1); status = card_send_command(ioaddr, name, Command0, st); if (status) return status; status = card_send_command(ioaddr, name, Command1, st); if (status) return status; if (st[3] != 0xf1) { status = sb1000_start_get_set_command(ioaddr, name); if (status) return status; return -EIO; } udelay(1000); return sb1000_start_get_set_command(ioaddr, name); } /* get SB1000 firmware version */ static int sb1000_get_firmware_version(const int ioaddr[], const char* name, unsigned char version[], int do_end) { static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; int status; if ((status = sb1000_start_get_set_command(ioaddr, name))) return status; if ((status = card_send_command(ioaddr, name, Command0, st))) return status; if (st[0] != 0xa3) return -EIO; version[0] = st[1]; version[1] = st[2]; if (do_end) return sb1000_end_get_set_command(ioaddr, name); else return 0; } /* get SB1000 frequency */ static int sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency) { static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; int status; udelay(1000); if ((status = sb1000_start_get_set_command(ioaddr, name))) return status; if ((status = card_send_command(ioaddr, name, Command0, st))) return status; *frequency = ((st[1] << 8 | st[2]) << 8 | st[3]) << 8 | st[4]; return sb1000_end_get_set_command(ioaddr, name); } /* set SB1000 frequency */ static int sb1000_set_frequency(const int ioaddr[], const char* name, int frequency) { unsigned char st[7]; int status; unsigned char Command0[6] = {0x80, 0x29, 0x00, 0x00, 0x00, 0x00}; const int FrequencyLowerLimit = 57000; const int FrequencyUpperLimit = 804000; if (frequency < FrequencyLowerLimit || frequency > FrequencyUpperLimit) { printk(KERN_ERR "%s: frequency chosen (%d kHz) is not in the range " "[%d,%d] kHz\n", name, frequency, FrequencyLowerLimit, FrequencyUpperLimit); return -EINVAL; } udelay(1000); if ((status = sb1000_start_get_set_command(ioaddr, name))) return status; Command0[5] = frequency & 0xff; frequency >>= 8; Command0[4] = frequency & 0xff; frequency >>= 8; Command0[3] = frequency & 0xff; frequency >>= 8; Command0[2] = frequency & 0xff; return card_send_command(ioaddr, name, Command0, st); } /* get SB1000 PIDs */ static int sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[]) { static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00}; static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00}; static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00}; static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; int status; udelay(1000); if ((status = sb1000_start_get_set_command(ioaddr, name))) return status; if ((status = card_send_command(ioaddr, name, Command0, st))) return status; PID[0] = st[1] << 8 | st[2]; if ((status = card_send_command(ioaddr, name, Command1, st))) return status; PID[1] = st[1] << 8 | st[2]; if ((status = card_send_command(ioaddr, name, Command2, st))) return status; PID[2] = st[1] << 8 | st[2]; if ((status = card_send_command(ioaddr, name, Command3, st))) return status; PID[3] = st[1] << 8 | st[2]; return sb1000_end_get_set_command(ioaddr, name); } /* set SB1000 PIDs */ static int sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]) { static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; short p; int status; unsigned char Command0[6] = {0x80, 0x31, 0x00, 0x00, 0x00, 0x00}; unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00}; unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00}; unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00}; udelay(1000); if ((status = sb1000_start_get_set_command(ioaddr, name))) return status; p = PID[0]; Command0[3] = p & 0xff; p >>= 8; Command0[2] = p & 0xff; if ((status = card_send_command(ioaddr, name, Command0, st))) return status; p = PID[1]; Command1[3] = p & 0xff; p >>= 8; Command1[2] = p & 0xff; if ((status = card_send_command(ioaddr, name, Command1, st))) return status; p = PID[2]; Command2[3] = p & 0xff; p >>= 8; Command2[2] = p & 0xff; if ((status = card_send_command(ioaddr, name, Command2, st))) return status; p = PID[3]; Command3[3] = p & 0xff; p >>= 8; Command3[2] = p & 0xff; if ((status = card_send_command(ioaddr, name, Command3, st))) return status; if ((status = card_send_command(ioaddr, name, Command4, st))) return status; return sb1000_end_get_set_command(ioaddr, name); } static void sb1000_print_status_buffer(const char* name, unsigned char st[], unsigned char buffer[], int size) { int i, j, k; printk(KERN_DEBUG "%s: status: %02x %02x\n", name, st[0], st[1]); if (buffer[24] == 0x08 && buffer[25] == 0x00 && buffer[26] == 0x45) { printk(KERN_DEBUG "%s: length: %d protocol: %d from: %d.%d.%d.%d:%d " "to %d.%d.%d.%d:%d\n", name, buffer[28] << 8 | buffer[29], buffer[35], buffer[38], buffer[39], buffer[40], buffer[41], buffer[46] << 8 | buffer[47], buffer[42], buffer[43], buffer[44], buffer[45], buffer[48] << 8 | buffer[49]); } else { for (i = 0, k = 0; i < (size + 7) / 8; i++) { printk(KERN_DEBUG "%s: %s", name, i ? " " : "buffer:"); for (j = 0; j < 8 && k < size; j++, k++) printk(" %02x", buffer[k]); printk("\n"); } } } /* * SB1000 commands for frame rx interrupt */ /* receive a single frame and assemble datagram * (this is the heart of the interrupt routine) */ static int sb1000_rx(struct net_device *dev) { #define FRAMESIZE 184 unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id; short dlen; int ioaddr, ns; unsigned int skbsize; struct sk_buff *skb; struct sb1000_private *lp = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; /* SB1000 frame constants */ const int FrameSize = FRAMESIZE; const int NewDatagramHeaderSkip = 8; const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18; const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize; const int ContDatagramHeaderSkip = 7; const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1; const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize; const int TrailerSize = 4; ioaddr = dev->base_addr; insw(ioaddr, (unsigned short*) st, 1); #ifdef XXXDEBUG printk("cm0: received: %02x %02x\n", st[0], st[1]); #endif /* XXXDEBUG */ lp->rx_frames++; /* decide if it is a good or bad frame */ for (ns = 0; ns < NPIDS; ns++) { session_id = lp->rx_session_id[ns]; frame_id = lp->rx_frame_id[ns]; if (st[0] == session_id) { if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) { goto good_frame; } else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) { goto skipped_frame; } else { goto bad_frame; } } else if (st[0] == (session_id | 0x40)) { if ((st[1] & 0xf0) == 0x30) { goto skipped_frame; } else { goto bad_frame; } } } goto bad_frame; skipped_frame: stats->rx_frame_errors++; skb = lp->rx_skb[ns]; if (sb1000_debug > 1) printk(KERN_WARNING "%s: missing frame(s): got %02x %02x " "expecting %02x %02x\n", dev->name, st[0], st[1], skb ? session_id : session_id | 0x40, frame_id); if (skb) { dev_kfree_skb(skb); skb = NULL; } good_frame: lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f); /* new datagram */ if (st[0] & 0x40) { /* get data length */ insw(ioaddr, buffer, NewDatagramHeaderSize / 2); #ifdef XXXDEBUG printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]); #endif /* XXXDEBUG */ if (buffer[0] != NewDatagramHeaderSkip) { if (sb1000_debug > 1) printk(KERN_WARNING "%s: new datagram header skip error: " "got %02x expecting %02x\n", dev->name, buffer[0], NewDatagramHeaderSkip); stats->rx_length_errors++; insw(ioaddr, buffer, NewDatagramDataSize / 2); goto bad_frame_next; } dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 | buffer[NewDatagramHeaderSkip + 4]) - 17; if (dlen > SB1000_MRU) { if (sb1000_debug > 1) printk(KERN_WARNING "%s: datagram length (%d) greater " "than MRU (%d)\n", dev->name, dlen, SB1000_MRU); stats->rx_length_errors++; insw(ioaddr, buffer, NewDatagramDataSize / 2); goto bad_frame_next; } lp->rx_dlen[ns] = dlen; /* compute size to allocate for datagram */ skbsize = dlen + FrameSize; if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) { if (sb1000_debug > 1) printk(KERN_WARNING "%s: can't allocate %d bytes long " "skbuff\n", dev->name, skbsize); stats->rx_dropped++; insw(ioaddr, buffer, NewDatagramDataSize / 2); goto dropped_frame; } skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; insw(ioaddr, skb_put(skb, NewDatagramDataSize), NewDatagramDataSize / 2); lp->rx_skb[ns] = skb; } else { /* continuation of previous datagram */ insw(ioaddr, buffer, ContDatagramHeaderSize / 2); if (buffer[0] != ContDatagramHeaderSkip) { if (sb1000_debug > 1) printk(KERN_WARNING "%s: cont datagram header skip error: " "got %02x expecting %02x\n", dev->name, buffer[0], ContDatagramHeaderSkip); stats->rx_length_errors++; insw(ioaddr, buffer, ContDatagramDataSize / 2); goto bad_frame_next; } skb = lp->rx_skb[ns]; insw(ioaddr, skb_put(skb, ContDatagramDataSize), ContDatagramDataSize / 2); dlen = lp->rx_dlen[ns]; } if (skb->len < dlen + TrailerSize) { lp->rx_session_id[ns] &= ~0x40; return 0; } /* datagram completed: send to upper level */ skb_trim(skb, dlen); __netif_rx(skb); stats->rx_bytes+=dlen; stats->rx_packets++; lp->rx_skb[ns] = NULL; lp->rx_session_id[ns] |= 0x40; return 0; bad_frame: insw(ioaddr, buffer, FrameSize / 2); if (sb1000_debug > 1) printk(KERN_WARNING "%s: frame error: got %02x %02x\n", dev->name, st[0], st[1]); stats->rx_frame_errors++; bad_frame_next: if (sb1000_debug > 2) sb1000_print_status_buffer(dev->name, st, buffer, FrameSize); dropped_frame: stats->rx_errors++; if (ns < NPIDS) { if ((skb = lp->rx_skb[ns])) { dev_kfree_skb(skb); lp->rx_skb[ns] = NULL; } lp->rx_session_id[ns] |= 0x40; } return -1; } static void sb1000_error_dpc(struct net_device *dev) { static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00}; char *name; unsigned char st[5]; int ioaddr[2]; struct sb1000_private *lp = netdev_priv(dev); const int ErrorDpcCounterInitialize = 200; ioaddr[0] = dev->base_addr; /* mem_start holds the second I/O address */ ioaddr[1] = dev->mem_start; name = dev->name; sb1000_wait_for_ready_clear(ioaddr, name); sb1000_send_command(ioaddr, name, Command0); sb1000_wait_for_ready(ioaddr, name); sb1000_read_status(ioaddr, st); if (st[1] & 0x10) lp->rx_error_dpc_count = ErrorDpcCounterInitialize; } /* * Linux interface functions */ static int sb1000_open(struct net_device *dev) { char *name; int ioaddr[2], status; struct sb1000_private *lp = netdev_priv(dev); const unsigned short FirmwareVersion[] = {0x01, 0x01}; ioaddr[0] = dev->base_addr; /* mem_start holds the second I/O address */ ioaddr[1] = dev->mem_start; name = dev->name; /* initialize sb1000 */ if ((status = sb1000_reset(ioaddr, name))) return status; ssleep(1); if ((status = sb1000_check_CRC(ioaddr, name))) return status; /* initialize private data before board can catch interrupts */ lp->rx_skb[0] = NULL; lp->rx_skb[1] = NULL; lp->rx_skb[2] = NULL; lp->rx_skb[3] = NULL; lp->rx_dlen[0] = 0; lp->rx_dlen[1] = 0; lp->rx_dlen[2] = 0; lp->rx_dlen[3] = 0; lp->rx_frames = 0; lp->rx_error_count = 0; lp->rx_error_dpc_count = 0; lp->rx_session_id[0] = 0x50; lp->rx_session_id[1] = 0x48; lp->rx_session_id[2] = 0x44; lp->rx_session_id[3] = 0x42; lp->rx_frame_id[0] = 0; lp->rx_frame_id[1] = 0; lp->rx_frame_id[2] = 0; lp->rx_frame_id[3] = 0; if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) { return -EAGAIN; } if (sb1000_debug > 2) printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq); /* Activate board and check firmware version */ udelay(1000); if ((status = sb1000_activate(ioaddr, name))) return status; udelay(0); if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0))) return status; if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1]) printk(KERN_WARNING "%s: found firmware version %x.%02x " "(should be %x.%02x)\n", name, version[0], version[1], FirmwareVersion[0], FirmwareVersion[1]); netif_start_queue(dev); return 0; /* Always succeed */ } static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { char* name; unsigned char version[2]; short PID[4]; int ioaddr[2], status, frequency; unsigned int stats[5]; struct sb1000_private *lp = netdev_priv(dev); if (!(dev && dev->flags & IFF_UP)) return -ENODEV; ioaddr[0] = dev->base_addr; /* mem_start holds the second I/O address */ ioaddr[1] = dev->mem_start; name = dev->name; switch (cmd) { case SIOCGCMSTATS: /* get statistics */ stats[0] = dev->stats.rx_bytes; stats[1] = lp->rx_frames; stats[2] = dev->stats.rx_packets; stats[3] = dev->stats.rx_errors; stats[4] = dev->stats.rx_dropped; if (copy_to_user(data, stats, sizeof(stats))) return -EFAULT; status = 0; break; case SIOCGCMFIRMWARE: /* get firmware version */ if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1))) return status; if (copy_to_user(data, version, sizeof(version))) return -EFAULT; break; case SIOCGCMFREQUENCY: /* get frequency */ if ((status = sb1000_get_frequency(ioaddr, name, &frequency))) return status; if (put_user(frequency, (int __user *)data)) return -EFAULT; break; case SIOCSCMFREQUENCY: /* set frequency */ if (!capable(CAP_NET_ADMIN)) return -EPERM; if (get_user(frequency, (int __user *)data)) return -EFAULT; if ((status = sb1000_set_frequency(ioaddr, name, frequency))) return status; break; case SIOCGCMPIDS: /* get PIDs */ if ((status = sb1000_get_PIDs(ioaddr, name, PID))) return status; if (copy_to_user(data, PID, sizeof(PID))) return -EFAULT; break; case SIOCSCMPIDS: /* set PIDs */ if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(PID, data, sizeof(PID))) return -EFAULT; if ((status = sb1000_set_PIDs(ioaddr, name, PID))) return status; /* set session_id, frame_id and pkt_type too */ lp->rx_session_id[0] = 0x50 | (PID[0] & 0x0f); lp->rx_session_id[1] = 0x48; lp->rx_session_id[2] = 0x44; lp->rx_session_id[3] = 0x42; lp->rx_frame_id[0] = 0; lp->rx_frame_id[1] = 0; lp->rx_frame_id[2] = 0; lp->rx_frame_id[3] = 0; break; default: status = -EINVAL; break; } return status; } /* transmit function: do nothing since SB1000 can't send anything out */ static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev) { printk(KERN_WARNING "%s: trying to transmit!!!\n", dev->name); /* sb1000 can't xmit datagrams */ dev_kfree_skb(skb); return NETDEV_TX_OK; } /* SB1000 interrupt handler. */ static irqreturn_t sb1000_interrupt(int irq, void *dev_id) { static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00}; static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00}; char *name; unsigned char st; int ioaddr[2]; struct net_device *dev = dev_id; struct sb1000_private *lp = netdev_priv(dev); const int MaxRxErrorCount = 6; ioaddr[0] = dev->base_addr; /* mem_start holds the second I/O address */ ioaddr[1] = dev->mem_start; name = dev->name; /* is it a good interrupt? */ st = inb(ioaddr[1] + 6); if (!(st & 0x08 && st & 0x20)) { return IRQ_NONE; } if (sb1000_debug > 3) printk(KERN_DEBUG "%s: entering interrupt\n", dev->name); st = inb(ioaddr[0] + 7); if (sb1000_rx(dev)) lp->rx_error_count++; #ifdef SB1000_DELAY udelay(SB1000_DELAY); #endif /* SB1000_DELAY */ sb1000_issue_read_command(ioaddr, name); if (st & 0x01) { sb1000_error_dpc(dev); sb1000_issue_read_command(ioaddr, name); } if (lp->rx_error_dpc_count && !(--lp->rx_error_dpc_count)) { sb1000_wait_for_ready_clear(ioaddr, name); sb1000_send_command(ioaddr, name, Command0); sb1000_wait_for_ready(ioaddr, name); sb1000_issue_read_command(ioaddr, name); } if (lp->rx_error_count >= MaxRxErrorCount) { sb1000_wait_for_ready_clear(ioaddr, name); sb1000_send_command(ioaddr, name, Command1); sb1000_wait_for_ready(ioaddr, name); sb1000_issue_read_command(ioaddr, name); lp->rx_error_count = 0; } return IRQ_HANDLED; } static int sb1000_close(struct net_device *dev) { int i; int ioaddr[2]; struct sb1000_private *lp = netdev_priv(dev); if (sb1000_debug > 2) printk(KERN_DEBUG "%s: Shutting down sb1000.\n", dev->name); netif_stop_queue(dev); ioaddr[0] = dev->base_addr; /* mem_start holds the second I/O address */ ioaddr[1] = dev->mem_start; free_irq(dev->irq, dev); /* If we don't do this, we can't re-insmod it later. */ release_region(ioaddr[1], SB1000_IO_EXTENT); release_region(ioaddr[0], SB1000_IO_EXTENT); /* free rx_skb's if needed */ for (i=0; i<4; i++) { if (lp->rx_skb[i]) { dev_kfree_skb(lp->rx_skb[i]); } } return 0; } MODULE_AUTHOR("Franco Venturi <[email protected]>"); MODULE_DESCRIPTION("General Instruments SB1000 driver"); MODULE_LICENSE("GPL"); module_pnp_driver(sb1000_driver);
linux-master
drivers/net/sb1000.c
/* mii.c: MII interface library Maintained by Jeff Garzik <[email protected]> Copyright 2001,2002 Jeff Garzik Various code came from myson803.c and other files by Donald Becker. Copyright: Written 1998-2002 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as [email protected], or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); return mii_lpa_to_ethtool_lpa_t(advert); } /** * mii_ethtool_gset - get settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * The @ecmd parameter is expected to have been cleared before calling * mii_ethtool_gset(). */ void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ ecmd->port = PORT_MII; /* only supports internal transceiver */ ecmd->transceiver = XCVR_INTERNAL; /* this isn't fully supported at higher layers */ ecmd->phy_address = mii->phy_id; ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22; ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) ecmd->advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); ecmd->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { ecmd->lp_advertising = 0; } nego = ecmd->advertising & ecmd->lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_1000); ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_100); ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); } else { ethtool_cmd_speed_set(ecmd, SPEED_10); ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { ecmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10))); ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } mii->full_duplex = ecmd->duplex; /* ignore maxtxpkt, maxrxpkt for now */ } /** * mii_ethtool_get_link_ksettings - get settings that are specified in @cmd * @mii: MII interface * @cmd: requested ethtool_link_ksettings * * The @cmd parameter is expected to have been cleared before calling * mii_ethtool_get_link_ksettings(). */ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii, struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego, supported, advertising, lp_advertising; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ cmd->base.port = PORT_MII; /* this isn't fully supported at higher layers */ cmd->base.phy_address = mii->phy_id; cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { advertising |= ADVERTISED_Autoneg; cmd->base.autoneg = AUTONEG_ENABLE; if (bmsr & BMSR_ANEGCOMPLETE) { lp_advertising = mii_get_an(mii, MII_LPA); lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { lp_advertising = 0; } nego = advertising & lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { cmd->base.speed = SPEED_1000; cmd->base.duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { cmd->base.speed = SPEED_100; cmd->base.duplex = !!(nego & ADVERTISED_100baseT_Full); } else { cmd->base.speed = SPEED_10; cmd->base.duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.speed = ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10)); cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; lp_advertising = 0; } mii->full_duplex = cmd->base.duplex; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, lp_advertising); /* ignore maxtxpkt, maxrxpkt for now */ } /** * mii_ethtool_sset - set settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * Returns 0 for success, negative on error. */ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u32 speed = ethtool_cmd_speed(ecmd); if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; if (ecmd->port != PORT_MII) return -EINVAL; if (ecmd->transceiver != XCVR_INTERNAL) return -EINVAL; if (ecmd->phy_address != mii->phy_id) return -EINVAL; if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (ecmd->autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (ecmd->duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else mii->full_duplex = 0; if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_ethtool_set_link_ksettings - set settings that are specified in @cmd * @mii: MII interfaces * @cmd: requested ethtool_link_ksettings * * Returns 0 for success, negative on error. */ int mii_ethtool_set_link_ksettings(struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u32 speed = cmd->base.speed; if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL) return -EINVAL; if (cmd->base.port != PORT_MII) return -EINVAL; if (cmd->base.phy_address != mii->phy_id) return -EINVAL; if (cmd->base.autoneg != AUTONEG_DISABLE && cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; u32 advertising; ethtool_convert_link_mode_to_legacy_u32( &advertising, cmd->link_modes.advertising); if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (cmd->base.duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else { mii->full_duplex = 0; } if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_check_gmii_support - check if the MII supports Gb interfaces * @mii: the MII interface */ int mii_check_gmii_support(struct mii_if_info *mii) { int reg; reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (reg & BMSR_ESTATEN) { reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) return 1; } return 0; } /** * mii_link_ok - is link status up/ok * @mii: the MII interface * * Returns 1 if the MII reports link status up/ok, 0 otherwise. */ int mii_link_ok (struct mii_if_info *mii) { /* first, a dummy read, needed to latch some MII phys */ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) return 1; return 0; } /** * mii_nway_restart - restart NWay (autonegotiation) for this interface * @mii: the MII interface * * Returns 0 on success, negative on error. */ int mii_nway_restart (struct mii_if_info *mii) { int bmcr; int r = -EINVAL; /* if autoneg is off, it's an error */ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { bmcr |= BMCR_ANRESTART; mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); r = 0; } return r; } /** * mii_check_link - check MII link status * @mii: MII interface * * If the link status changed (previous != current), call * netif_carrier_on() if current link status is Up or call * netif_carrier_off() if current link status is Down. */ void mii_check_link (struct mii_if_info *mii) { int cur_link = mii_link_ok(mii); int prev_link = netif_carrier_ok(mii->dev); if (cur_link && !prev_link) netif_carrier_on(mii->dev); else if (prev_link && !cur_link) netif_carrier_off(mii->dev); } /** * mii_check_media - check the MII interface for a carrier/speed/duplex change * @mii: the MII interface * @ok_to_print: OK to print link up/down messages * @init_media: OK to save duplex mode in @mii * * Returns 1 if the duplex mode changed, 0 if not. * If the media type is forced, always returns 0. */ unsigned int mii_check_media (struct mii_if_info *mii, unsigned int ok_to_print, unsigned int init_media) { unsigned int old_carrier, new_carrier; int advertise, lpa, media, duplex; int lpa2 = 0; /* check current and old link status */ old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; new_carrier = (unsigned int) mii_link_ok(mii); /* if carrier state did not change, this is a "bounce", * just exit as everything is already set correctly */ if ((!init_media) && (old_carrier == new_carrier)) return 0; /* duplex did not change */ /* no carrier, nothing much to do */ if (!new_carrier) { netif_carrier_off(mii->dev); if (ok_to_print) netdev_info(mii->dev, "link down\n"); return 0; /* duplex did not change */ } /* * we have carrier, see who's on the other end */ netif_carrier_on(mii->dev); if (mii->force_media) { if (ok_to_print) netdev_info(mii->dev, "link up\n"); return 0; /* duplex did not change */ } /* get MII advertise and LPA values */ if ((!init_media) && (mii->advertising)) advertise = mii->advertising; else { advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); mii->advertising = advertise; } lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); if (mii->supports_gmii) lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; if (lpa2 & LPA_1000FULL) duplex = 1; if (ok_to_print) netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10, duplex ? "full" : "half", lpa); if ((init_media) || (mii->full_duplex != duplex)) { mii->full_duplex = duplex; return 1; /* duplex changed */ } return 0; /* duplex did not change */ } /** * generic_mii_ioctl - main MII ioctl interface * @mii_if: the MII interface * @mii_data: MII ioctl data structure * @cmd: MII ioctl command * @duplex_chg_out: pointer to @duplex_changed status if there was no * ioctl error * * Returns 0 on success, negative on error. */ int generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_chg_out) { int rc = 0; unsigned int duplex_changed = 0; if (duplex_chg_out) *duplex_chg_out = 0; mii_data->phy_id &= mii_if->phy_id_mask; mii_data->reg_num &= mii_if->reg_num_mask; switch(cmd) { case SIOCGMIIPHY: mii_data->phy_id = mii_if->phy_id; fallthrough; case SIOCGMIIREG: mii_data->val_out = mii_if->mdio_read(mii_if->dev, mii_data->phy_id, mii_data->reg_num); break; case SIOCSMIIREG: { u16 val = mii_data->val_in; if (mii_data->phy_id == mii_if->phy_id) { switch(mii_data->reg_num) { case MII_BMCR: { unsigned int new_duplex = 0; if (val & (BMCR_RESET|BMCR_ANENABLE)) mii_if->force_media = 0; else mii_if->force_media = 1; if (mii_if->force_media && (val & BMCR_FULLDPLX)) new_duplex = 1; if (mii_if->full_duplex != new_duplex) { duplex_changed = 1; mii_if->full_duplex = new_duplex; } break; } case MII_ADVERTISE: mii_if->advertising = val; break; default: /* do nothing */ break; } } mii_if->mdio_write(mii_if->dev, mii_data->phy_id, mii_data->reg_num, val); break; } default: rc = -EOPNOTSUPP; break; } if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) *duplex_chg_out = 1; return rc; } MODULE_AUTHOR ("Jeff Garzik <[email protected]>"); MODULE_DESCRIPTION ("MII hardware support library"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(mii_link_ok); EXPORT_SYMBOL(mii_nway_restart); EXPORT_SYMBOL(mii_ethtool_gset); EXPORT_SYMBOL(mii_ethtool_get_link_ksettings); EXPORT_SYMBOL(mii_ethtool_sset); EXPORT_SYMBOL(mii_ethtool_set_link_ksettings); EXPORT_SYMBOL(mii_check_link); EXPORT_SYMBOL(mii_check_media); EXPORT_SYMBOL(mii_check_gmii_support); EXPORT_SYMBOL(generic_mii_ioctl);
linux-master
drivers/net/mii.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/net/macsec.c - MACsec device * * Copyright (c) 2015 Sabrina Dubroca <[email protected]> */ #include <linux/types.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/module.h> #include <crypto/aead.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/refcount.h> #include <net/genetlink.h> #include <net/sock.h> #include <net/gro_cells.h> #include <net/macsec.h> #include <net/dst_metadata.h> #include <linux/phy.h> #include <linux/byteorder/generic.h> #include <linux/if_arp.h> #include <uapi/linux/if_macsec.h> /* SecTAG length = macsec_eth_header without the optional SCI */ #define MACSEC_TAG_LEN 6 struct macsec_eth_header { struct ethhdr eth; /* SecTAG */ u8 tci_an; #if defined(__LITTLE_ENDIAN_BITFIELD) u8 short_length:6, unused:2; #elif defined(__BIG_ENDIAN_BITFIELD) u8 unused:2, short_length:6; #else #error "Please fix <asm/byteorder.h>" #endif __be32 packet_number; u8 secure_channel_id[8]; /* optional */ } __packed; /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ #define MIN_NON_SHORT_LEN 48 #define GCM_AES_IV_LEN 12 #define for_each_rxsc(secy, sc) \ for (sc = rcu_dereference_bh(secy->rx_sc); \ sc; \ sc = rcu_dereference_bh(sc->next)) #define for_each_rxsc_rtnl(secy, sc) \ for (sc = rtnl_dereference(secy->rx_sc); \ sc; \ sc = rtnl_dereference(sc->next)) #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) struct gcm_iv_xpn { union { u8 short_secure_channel_id[4]; ssci_t ssci; }; __be64 pn; } __packed; struct gcm_iv { union { u8 secure_channel_id[8]; sci_t sci; }; __be32 pn; }; #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT struct pcpu_secy_stats { struct macsec_dev_stats stats; struct u64_stats_sync syncp; }; /** * struct macsec_dev - private data * @secy: SecY config * @real_dev: pointer to underlying netdevice * @dev_tracker: refcount tracker for @real_dev reference * @stats: MACsec device stats * @secys: linked list of SecY's on the underlying device * @gro_cells: pointer to the Generic Receive Offload cell * @offload: status of offloading on the MACsec device */ struct macsec_dev { struct macsec_secy secy; struct net_device *real_dev; netdevice_tracker dev_tracker; struct pcpu_secy_stats __percpu *stats; struct list_head secys; struct gro_cells gro_cells; enum macsec_offload offload; }; /** * struct macsec_rxh_data - rx_handler private argument * @secys: linked list of SecY's on this underlying device */ struct macsec_rxh_data { struct list_head secys; }; static struct macsec_dev *macsec_priv(const struct net_device *dev) { return (struct macsec_dev *)netdev_priv(dev); } static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) { return rcu_dereference_bh(dev->rx_handler_data); } static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) { return rtnl_dereference(dev->rx_handler_data); } struct macsec_cb { struct aead_request *req; union { struct macsec_tx_sa *tx_sa; struct macsec_rx_sa *rx_sa; }; u8 assoc_num; bool valid; bool has_sci; }; static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) { struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); if (!sa || !sa->active) return NULL; if (!refcount_inc_not_zero(&sa->refcnt)) return NULL; return sa; } static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) { struct macsec_rx_sa *sa = NULL; int an; for (an = 0; an < MACSEC_NUM_AN; an++) { sa = macsec_rxsa_get(rx_sc->sa[an]); if (sa) break; } return sa; } static void free_rx_sc_rcu(struct rcu_head *head) { struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); free_percpu(rx_sc->stats); kfree(rx_sc); } static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) { return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; } static void macsec_rxsc_put(struct macsec_rx_sc *sc) { if (refcount_dec_and_test(&sc->refcnt)) call_rcu(&sc->rcu_head, free_rx_sc_rcu); } static void free_rxsa(struct rcu_head *head) { struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); crypto_free_aead(sa->key.tfm); free_percpu(sa->stats); kfree(sa); } static void macsec_rxsa_put(struct macsec_rx_sa *sa) { if (refcount_dec_and_test(&sa->refcnt)) call_rcu(&sa->rcu, free_rxsa); } static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) { struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); if (!sa || !sa->active) return NULL; if (!refcount_inc_not_zero(&sa->refcnt)) return NULL; return sa; } static void free_txsa(struct rcu_head *head) { struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); crypto_free_aead(sa->key.tfm); free_percpu(sa->stats); kfree(sa); } static void macsec_txsa_put(struct macsec_tx_sa *sa) { if (refcount_dec_and_test(&sa->refcnt)) call_rcu(&sa->rcu, free_txsa); } static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); return (struct macsec_cb *)skb->cb; } #define MACSEC_PORT_SCB (0x0000) #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) #define MACSEC_GCM_AES_128_SAK_LEN 16 #define MACSEC_GCM_AES_256_SAK_LEN 32 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN #define DEFAULT_XPN false #define DEFAULT_SEND_SCI true #define DEFAULT_ENCRYPT false #define DEFAULT_ENCODING_SA 0 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) static sci_t make_sci(const u8 *addr, __be16 port) { sci_t sci; memcpy(&sci, addr, ETH_ALEN); memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); return sci; } static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) { sci_t sci; if (sci_present) memcpy(&sci, hdr->secure_channel_id, sizeof(hdr->secure_channel_id)); else sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); return sci; } static unsigned int macsec_sectag_len(bool sci_present) { return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); } static unsigned int macsec_hdr_len(bool sci_present) { return macsec_sectag_len(sci_present) + ETH_HLEN; } static unsigned int macsec_extra_len(bool sci_present) { return macsec_sectag_len(sci_present) + sizeof(__be16); } /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ static void macsec_fill_sectag(struct macsec_eth_header *h, const struct macsec_secy *secy, u32 pn, bool sci_present) { const struct macsec_tx_sc *tx_sc = &secy->tx_sc; memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); h->eth.h_proto = htons(ETH_P_MACSEC); if (sci_present) { h->tci_an |= MACSEC_TCI_SC; memcpy(&h->secure_channel_id, &secy->sci, sizeof(h->secure_channel_id)); } else { if (tx_sc->end_station) h->tci_an |= MACSEC_TCI_ES; if (tx_sc->scb) h->tci_an |= MACSEC_TCI_SCB; } h->packet_number = htonl(pn); /* with GCM, C/E clear for !encrypt, both set for encrypt */ if (tx_sc->encrypt) h->tci_an |= MACSEC_TCI_CONFID; else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) h->tci_an |= MACSEC_TCI_C; h->tci_an |= tx_sc->encoding_sa; } static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) { if (data_len < MIN_NON_SHORT_LEN) h->short_length = data_len; } /* Checks if a MACsec interface is being offloaded to an hardware engine */ static bool macsec_is_offloaded(struct macsec_dev *macsec) { if (macsec->offload == MACSEC_OFFLOAD_MAC || macsec->offload == MACSEC_OFFLOAD_PHY) return true; return false; } /* Checks if underlying layers implement MACsec offloading functions. */ static bool macsec_check_offload(enum macsec_offload offload, struct macsec_dev *macsec) { if (!macsec || !macsec->real_dev) return false; if (offload == MACSEC_OFFLOAD_PHY) return macsec->real_dev->phydev && macsec->real_dev->phydev->macsec_ops; else if (offload == MACSEC_OFFLOAD_MAC) return macsec->real_dev->features & NETIF_F_HW_MACSEC && macsec->real_dev->macsec_ops; return false; } static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, struct macsec_dev *macsec, struct macsec_context *ctx) { if (ctx) { memset(ctx, 0, sizeof(*ctx)); ctx->offload = offload; if (offload == MACSEC_OFFLOAD_PHY) ctx->phydev = macsec->real_dev->phydev; else if (offload == MACSEC_OFFLOAD_MAC) ctx->netdev = macsec->real_dev; } if (offload == MACSEC_OFFLOAD_PHY) return macsec->real_dev->phydev->macsec_ops; else return macsec->real_dev->macsec_ops; } /* Returns a pointer to the MACsec ops struct if any and updates the MACsec * context device reference if provided. */ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, struct macsec_context *ctx) { if (!macsec_check_offload(macsec->offload, macsec)) return NULL; return __macsec_get_ops(macsec->offload, macsec, ctx); } /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) { struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; int len = skb->len - 2 * ETH_ALEN; int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; /* a) It comprises at least 17 octets */ if (skb->len <= 16) return false; /* b) MACsec EtherType: already checked */ /* c) V bit is clear */ if (h->tci_an & MACSEC_TCI_VERSION) return false; /* d) ES or SCB => !SC */ if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && (h->tci_an & MACSEC_TCI_SC)) return false; /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ if (h->unused) return false; /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ if (!h->packet_number && !xpn) return false; /* length check, f) g) h) i) */ if (h->short_length) return len == extra_len + h->short_length; return len >= extra_len + MIN_NON_SHORT_LEN; } #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, salt_t salt) { struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; gcm_iv->ssci = ssci ^ salt.ssci; gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; } static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) { struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; gcm_iv->sci = sci; gcm_iv->pn = htonl(pn); } static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) { return (struct macsec_eth_header *)skb_mac_header(skb); } static void __macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) { pr_debug("PN wrapped, transitioning to !oper\n"); tx_sa->active = false; if (secy->protect_frames) secy->operational = false; } void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) { spin_lock_bh(&tx_sa->lock); __macsec_pn_wrapped(secy, tx_sa); spin_unlock_bh(&tx_sa->lock); } EXPORT_SYMBOL_GPL(macsec_pn_wrapped); static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) { pn_t pn; spin_lock_bh(&tx_sa->lock); pn = tx_sa->next_pn_halves; if (secy->xpn) tx_sa->next_pn++; else tx_sa->next_pn_halves.lower++; if (tx_sa->next_pn == 0) __macsec_pn_wrapped(secy, tx_sa); spin_unlock_bh(&tx_sa->lock); return pn; } static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) { struct macsec_dev *macsec = netdev_priv(dev); skb->dev = macsec->real_dev; skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; } static unsigned int macsec_msdu_len(struct sk_buff *skb) { struct macsec_dev *macsec = macsec_priv(skb->dev); struct macsec_secy *secy = &macsec->secy; bool sci_present = macsec_skb_cb(skb)->has_sci; return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; } static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, struct macsec_tx_sa *tx_sa) { unsigned int msdu_len = macsec_msdu_len(skb); struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); u64_stats_update_begin(&txsc_stats->syncp); if (tx_sc->encrypt) { txsc_stats->stats.OutOctetsEncrypted += msdu_len; txsc_stats->stats.OutPktsEncrypted++; this_cpu_inc(tx_sa->stats->OutPktsEncrypted); } else { txsc_stats->stats.OutOctetsProtected += msdu_len; txsc_stats->stats.OutPktsProtected++; this_cpu_inc(tx_sa->stats->OutPktsProtected); } u64_stats_update_end(&txsc_stats->syncp); } static void count_tx(struct net_device *dev, int ret, int len) { if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) dev_sw_netstats_tx_add(dev, 1, len); } static void macsec_encrypt_done(void *data, int err) { struct sk_buff *skb = data; struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; int len, ret; aead_request_free(macsec_skb_cb(skb)->req); rcu_read_lock_bh(); macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); /* packet is encrypted/protected so tx_bytes must be calculated */ len = macsec_msdu_len(skb) + 2 * ETH_ALEN; macsec_encrypt_finish(skb, dev); ret = dev_queue_xmit(skb); count_tx(dev, ret, len); rcu_read_unlock_bh(); macsec_txsa_put(sa); dev_put(dev); } static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, unsigned char **iv, struct scatterlist **sg, int num_frags) { size_t size, iv_offset, sg_offset; struct aead_request *req; void *tmp; size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); iv_offset = size; size += GCM_AES_IV_LEN; size = ALIGN(size, __alignof__(struct scatterlist)); sg_offset = size; size += sizeof(struct scatterlist) * num_frags; tmp = kmalloc(size, GFP_ATOMIC); if (!tmp) return NULL; *iv = (unsigned char *)(tmp + iv_offset); *sg = (struct scatterlist *)(tmp + sg_offset); req = tmp; aead_request_set_tfm(req, tfm); return req; } static struct sk_buff *macsec_encrypt(struct sk_buff *skb, struct net_device *dev) { int ret; struct scatterlist *sg; struct sk_buff *trailer; unsigned char *iv; struct ethhdr *eth; struct macsec_eth_header *hh; size_t unprotected_len; struct aead_request *req; struct macsec_secy *secy; struct macsec_tx_sc *tx_sc; struct macsec_tx_sa *tx_sa; struct macsec_dev *macsec = macsec_priv(dev); bool sci_present; pn_t pn; secy = &macsec->secy; tx_sc = &secy->tx_sc; /* 10.5.1 TX SA assignment */ tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); if (!tx_sa) { secy->operational = false; kfree_skb(skb); return ERR_PTR(-EINVAL); } if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { struct sk_buff *nskb = skb_copy_expand(skb, MACSEC_NEEDED_HEADROOM, MACSEC_NEEDED_TAILROOM, GFP_ATOMIC); if (likely(nskb)) { consume_skb(skb); skb = nskb; } else { macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(-ENOMEM); } } else { skb = skb_unshare(skb, GFP_ATOMIC); if (!skb) { macsec_txsa_put(tx_sa); return ERR_PTR(-ENOMEM); } } unprotected_len = skb->len; eth = eth_hdr(skb); sci_present = macsec_send_sci(secy); hh = skb_push(skb, macsec_extra_len(sci_present)); memmove(hh, eth, 2 * ETH_ALEN); pn = tx_sa_update_pn(tx_sa, secy); if (pn.full64 == 0) { macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(-ENOLINK); } macsec_fill_sectag(hh, secy, pn.lower, sci_present); macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); skb_put(skb, secy->icv_len); if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.OutPktsTooLong++; u64_stats_update_end(&secy_stats->syncp); macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(-EINVAL); } ret = skb_cow_data(skb, 0, &trailer); if (unlikely(ret < 0)) { macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(ret); } req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); if (!req) { macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(-ENOMEM); } if (secy->xpn) macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); else macsec_fill_iv(iv, secy->sci, pn.lower); sg_init_table(sg, ret); ret = skb_to_sgvec(skb, sg, 0, skb->len); if (unlikely(ret < 0)) { aead_request_free(req); macsec_txsa_put(tx_sa); kfree_skb(skb); return ERR_PTR(ret); } if (tx_sc->encrypt) { int len = skb->len - macsec_hdr_len(sci_present) - secy->icv_len; aead_request_set_crypt(req, sg, sg, len, iv); aead_request_set_ad(req, macsec_hdr_len(sci_present)); } else { aead_request_set_crypt(req, sg, sg, 0, iv); aead_request_set_ad(req, skb->len - secy->icv_len); } macsec_skb_cb(skb)->req = req; macsec_skb_cb(skb)->tx_sa = tx_sa; macsec_skb_cb(skb)->has_sci = sci_present; aead_request_set_callback(req, 0, macsec_encrypt_done, skb); dev_hold(skb->dev); ret = crypto_aead_encrypt(req); if (ret == -EINPROGRESS) { return ERR_PTR(ret); } else if (ret != 0) { dev_put(skb->dev); kfree_skb(skb); aead_request_free(req); macsec_txsa_put(tx_sa); return ERR_PTR(-EINVAL); } dev_put(skb->dev); aead_request_free(req); macsec_txsa_put(tx_sa); return skb; } static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) { struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); struct macsec_eth_header *hdr = macsec_ethhdr(skb); u32 lowest_pn = 0; spin_lock(&rx_sa->lock); if (rx_sa->next_pn_halves.lower >= secy->replay_window) lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; /* Now perform replay protection check again * (see IEEE 802.1AE-2006 figure 10-5) */ if (secy->replay_protect && pn < lowest_pn && (!secy->xpn || pn_same_half(pn, lowest_pn))) { spin_unlock(&rx_sa->lock); u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); DEV_STATS_INC(secy->netdev, rx_dropped); return false; } if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { unsigned int msdu_len = macsec_msdu_len(skb); u64_stats_update_begin(&rxsc_stats->syncp); if (hdr->tci_an & MACSEC_TCI_E) rxsc_stats->stats.InOctetsDecrypted += msdu_len; else rxsc_stats->stats.InOctetsValidated += msdu_len; u64_stats_update_end(&rxsc_stats->syncp); } if (!macsec_skb_cb(skb)->valid) { spin_unlock(&rx_sa->lock); /* 10.6.5 */ if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotValid++; u64_stats_update_end(&rxsc_stats->syncp); this_cpu_inc(rx_sa->stats->InPktsNotValid); DEV_STATS_INC(secy->netdev, rx_errors); return false; } u64_stats_update_begin(&rxsc_stats->syncp); if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { rxsc_stats->stats.InPktsInvalid++; this_cpu_inc(rx_sa->stats->InPktsInvalid); } else if (pn < lowest_pn) { rxsc_stats->stats.InPktsDelayed++; } else { rxsc_stats->stats.InPktsUnchecked++; } u64_stats_update_end(&rxsc_stats->syncp); } else { u64_stats_update_begin(&rxsc_stats->syncp); if (pn < lowest_pn) { rxsc_stats->stats.InPktsDelayed++; } else { rxsc_stats->stats.InPktsOK++; this_cpu_inc(rx_sa->stats->InPktsOK); } u64_stats_update_end(&rxsc_stats->syncp); // Instead of "pn >=" - to support pn overflow in xpn if (pn + 1 > rx_sa->next_pn_halves.lower) { rx_sa->next_pn_halves.lower = pn + 1; } else if (secy->xpn && !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { rx_sa->next_pn_halves.upper++; rx_sa->next_pn_halves.lower = pn + 1; } spin_unlock(&rx_sa->lock); } return true; } static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) { skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, dev); skb_reset_network_header(skb); if (!skb_transport_header_was_set(skb)) skb_reset_transport_header(skb); skb_reset_mac_len(skb); } static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) { skb->ip_summed = CHECKSUM_NONE; memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); skb_pull(skb, hdr_len); pskb_trim_unique(skb, skb->len - icv_len); } static void count_rx(struct net_device *dev, int len) { dev_sw_netstats_rx_add(dev, len); } static void macsec_decrypt_done(void *data, int err) { struct sk_buff *skb = data; struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; struct macsec_rx_sc *rx_sc = rx_sa->sc; int len; u32 pn; aead_request_free(macsec_skb_cb(skb)->req); if (!err) macsec_skb_cb(skb)->valid = true; rcu_read_lock_bh(); pn = ntohl(macsec_ethhdr(skb)->packet_number); if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { rcu_read_unlock_bh(); kfree_skb(skb); goto out; } macsec_finalize_skb(skb, macsec->secy.icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); len = skb->len; macsec_reset_skb(skb, macsec->secy.netdev); if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) count_rx(dev, len); rcu_read_unlock_bh(); out: macsec_rxsa_put(rx_sa); macsec_rxsc_put(rx_sc); dev_put(dev); } static struct sk_buff *macsec_decrypt(struct sk_buff *skb, struct net_device *dev, struct macsec_rx_sa *rx_sa, sci_t sci, struct macsec_secy *secy) { int ret; struct scatterlist *sg; struct sk_buff *trailer; unsigned char *iv; struct aead_request *req; struct macsec_eth_header *hdr; u32 hdr_pn; u16 icv_len = secy->icv_len; macsec_skb_cb(skb)->valid = false; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return ERR_PTR(-ENOMEM); ret = skb_cow_data(skb, 0, &trailer); if (unlikely(ret < 0)) { kfree_skb(skb); return ERR_PTR(ret); } req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); if (!req) { kfree_skb(skb); return ERR_PTR(-ENOMEM); } hdr = (struct macsec_eth_header *)skb->data; hdr_pn = ntohl(hdr->packet_number); if (secy->xpn) { pn_t recovered_pn = rx_sa->next_pn_halves; recovered_pn.lower = hdr_pn; if (hdr_pn < rx_sa->next_pn_halves.lower && !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) recovered_pn.upper++; macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, rx_sa->key.salt); } else { macsec_fill_iv(iv, sci, hdr_pn); } sg_init_table(sg, ret); ret = skb_to_sgvec(skb, sg, 0, skb->len); if (unlikely(ret < 0)) { aead_request_free(req); kfree_skb(skb); return ERR_PTR(ret); } if (hdr->tci_an & MACSEC_TCI_E) { /* confidentiality: ethernet + macsec header * authenticated, encrypted payload */ int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); aead_request_set_crypt(req, sg, sg, len, iv); aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); skb = skb_unshare(skb, GFP_ATOMIC); if (!skb) { aead_request_free(req); return ERR_PTR(-ENOMEM); } } else { /* integrity only: all headers + data authenticated */ aead_request_set_crypt(req, sg, sg, icv_len, iv); aead_request_set_ad(req, skb->len - icv_len); } macsec_skb_cb(skb)->req = req; skb->dev = dev; aead_request_set_callback(req, 0, macsec_decrypt_done, skb); dev_hold(dev); ret = crypto_aead_decrypt(req); if (ret == -EINPROGRESS) { return ERR_PTR(ret); } else if (ret != 0) { /* decryption/authentication failed * 10.6 if validateFrames is disabled, deliver anyway */ if (ret != -EBADMSG) { kfree_skb(skb); skb = ERR_PTR(ret); } } else { macsec_skb_cb(skb)->valid = true; } dev_put(dev); aead_request_free(req); return skb; } static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) { struct macsec_rx_sc *rx_sc; for_each_rxsc(secy, rx_sc) { if (rx_sc->sci == sci) return rx_sc; } return NULL; } static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) { struct macsec_rx_sc *rx_sc; for_each_rxsc_rtnl(secy, rx_sc) { if (rx_sc->sci == sci) return rx_sc; } return NULL; } static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) { /* Deliver to the uncontrolled port by default */ enum rx_handler_result ret = RX_HANDLER_PASS; struct ethhdr *hdr = eth_hdr(skb); struct metadata_dst *md_dst; struct macsec_rxh_data *rxd; struct macsec_dev *macsec; rcu_read_lock(); rxd = macsec_data_rcu(skb->dev); md_dst = skb_metadata_dst(skb); list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct sk_buff *nskb; struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); struct net_device *ndev = macsec->secy.netdev; /* If h/w offloading is enabled, HW decodes frames and strips * the SecTAG, so we have to deduce which port to deliver to. */ if (macsec_is_offloaded(macsec) && netif_running(ndev)) { struct macsec_rx_sc *rx_sc = NULL; if (md_dst && md_dst->type == METADATA_MACSEC) rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci); if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc) continue; if (ether_addr_equal_64bits(hdr->h_dest, ndev->dev_addr)) { /* exact match, divert skb to this port */ skb->dev = ndev; skb->pkt_type = PACKET_HOST; ret = RX_HANDLER_ANOTHER; goto out; } else if (is_multicast_ether_addr_64bits( hdr->h_dest)) { /* multicast frame, deliver on this port too */ nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) break; nskb->dev = ndev; if (ether_addr_equal_64bits(hdr->h_dest, ndev->broadcast)) nskb->pkt_type = PACKET_BROADCAST; else nskb->pkt_type = PACKET_MULTICAST; __netif_rx(nskb); } else if (rx_sc || ndev->flags & IFF_PROMISC) { skb->dev = ndev; skb->pkt_type = PACKET_HOST; ret = RX_HANDLER_ANOTHER; goto out; } continue; } /* 10.6 If the management control validateFrames is not * Strict, frames without a SecTAG are received, counted, and * delivered to the Controlled Port */ if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoTag++; u64_stats_update_end(&secy_stats->syncp); DEV_STATS_INC(macsec->secy.netdev, rx_dropped); continue; } /* deliver on this port */ nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) break; nskb->dev = ndev; if (__netif_rx(nskb) == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUntagged++; u64_stats_update_end(&secy_stats->syncp); } } out: rcu_read_unlock(); return ret; } static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct net_device *dev = skb->dev; struct macsec_eth_header *hdr; struct macsec_secy *secy = NULL; struct macsec_rx_sc *rx_sc; struct macsec_rx_sa *rx_sa; struct macsec_rxh_data *rxd; struct macsec_dev *macsec; unsigned int len; sci_t sci; u32 hdr_pn; bool cbit; struct pcpu_rx_sc_stats *rxsc_stats; struct pcpu_secy_stats *secy_stats; bool pulled_sci; int ret; if (skb_headroom(skb) < ETH_HLEN) goto drop_direct; hdr = macsec_ethhdr(skb); if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) return handle_not_macsec(skb); skb = skb_unshare(skb, GFP_ATOMIC); *pskb = skb; if (!skb) return RX_HANDLER_CONSUMED; pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); if (!pulled_sci) { if (!pskb_may_pull(skb, macsec_extra_len(false))) goto drop_direct; } hdr = macsec_ethhdr(skb); /* Frames with a SecTAG that has the TCI E bit set but the C * bit clear are discarded, as this reserved encoding is used * to identify frames with a SecTAG that are not to be * delivered to the Controlled Port. */ if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) return RX_HANDLER_PASS; /* now, pull the extra length */ if (hdr->tci_an & MACSEC_TCI_SC) { if (!pulled_sci) goto drop_direct; } /* ethernet header is part of crypto processing */ skb_push(skb, ETH_HLEN); macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); rcu_read_lock(); rxd = macsec_data_rcu(skb->dev); list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); sc = sc ? macsec_rxsc_get(sc) : NULL; if (sc) { secy = &macsec->secy; rx_sc = sc; break; } } if (!secy) goto nosci; dev = secy->netdev; macsec = macsec_priv(dev); secy_stats = this_cpu_ptr(macsec->stats); rxsc_stats = this_cpu_ptr(rx_sc->stats); if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); DEV_STATS_INC(secy->netdev, rx_errors); goto drop_nosa; } rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); if (!rx_sa) { /* 10.6.1 if the SA is not in use */ /* If validateFrames is Strict or the C bit in the * SecTAG is set, discard */ struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); DEV_STATS_INC(secy->netdev, rx_errors); if (active_rx_sa) this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); goto drop_nosa; } /* not Strict, the frame (with the SecTAG and ICV * removed) is delivered to the Controlled Port. */ u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsUnusedSA++; u64_stats_update_end(&rxsc_stats->syncp); if (active_rx_sa) this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); goto deliver; } /* First, PN check to avoid decrypting obviously wrong packets */ hdr_pn = ntohl(hdr->packet_number); if (secy->replay_protect) { bool late; spin_lock(&rx_sa->lock); late = rx_sa->next_pn_halves.lower >= secy->replay_window && hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); if (secy->xpn) late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); spin_unlock(&rx_sa->lock); if (late) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); DEV_STATS_INC(macsec->secy.netdev, rx_dropped); goto drop; } } macsec_skb_cb(skb)->rx_sa = rx_sa; /* Disabled && !changed text => skip validation */ if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames != MACSEC_VALIDATE_DISABLED) skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); if (IS_ERR(skb)) { /* the decrypt callback needs the reference */ if (PTR_ERR(skb) != -EINPROGRESS) { macsec_rxsa_put(rx_sa); macsec_rxsc_put(rx_sc); } rcu_read_unlock(); *pskb = NULL; return RX_HANDLER_CONSUMED; } if (!macsec_post_decrypt(skb, secy, hdr_pn)) goto drop; deliver: macsec_finalize_skb(skb, secy->icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); len = skb->len; macsec_reset_skb(skb, secy->netdev); if (rx_sa) macsec_rxsa_put(rx_sa); macsec_rxsc_put(rx_sc); skb_orphan(skb); ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, len); else DEV_STATS_INC(macsec->secy.netdev, rx_dropped); rcu_read_unlock(); *pskb = NULL; return RX_HANDLER_CONSUMED; drop: macsec_rxsa_put(rx_sa); drop_nosa: macsec_rxsc_put(rx_sc); rcu_read_unlock(); drop_direct: kfree_skb(skb); *pskb = NULL; return RX_HANDLER_CONSUMED; nosci: /* 10.6.1 if the SC is not found */ cbit = !!(hdr->tci_an & MACSEC_TCI_C); if (!cbit) macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct sk_buff *nskb; secy_stats = this_cpu_ptr(macsec->stats); /* If validateFrames is Strict or the C bit in the * SecTAG is set, discard */ if (cbit || macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoSCI++; u64_stats_update_end(&secy_stats->syncp); DEV_STATS_INC(macsec->secy.netdev, rx_errors); continue; } /* not strict, the frame (with the SecTAG and ICV * removed) is delivered to the Controlled Port. */ nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) break; macsec_reset_skb(nskb, macsec->secy.netdev); ret = __netif_rx(nskb); if (ret == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUnknownSCI++; u64_stats_update_end(&secy_stats->syncp); } else { DEV_STATS_INC(macsec->secy.netdev, rx_dropped); } } rcu_read_unlock(); *pskb = skb; return RX_HANDLER_PASS; } static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) { struct crypto_aead *tfm; int ret; tfm = crypto_alloc_aead("gcm(aes)", 0, 0); if (IS_ERR(tfm)) return tfm; ret = crypto_aead_setkey(tfm, key, key_len); if (ret < 0) goto fail; ret = crypto_aead_setauthsize(tfm, icv_len); if (ret < 0) goto fail; return tfm; fail: crypto_free_aead(tfm); return ERR_PTR(ret); } static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, int icv_len) { rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); if (!rx_sa->stats) return -ENOMEM; rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); if (IS_ERR(rx_sa->key.tfm)) { free_percpu(rx_sa->stats); return PTR_ERR(rx_sa->key.tfm); } rx_sa->ssci = MACSEC_UNDEF_SSCI; rx_sa->active = false; rx_sa->next_pn = 1; refcount_set(&rx_sa->refcnt, 1); spin_lock_init(&rx_sa->lock); return 0; } static void clear_rx_sa(struct macsec_rx_sa *rx_sa) { rx_sa->active = false; macsec_rxsa_put(rx_sa); } static void free_rx_sc(struct macsec_rx_sc *rx_sc) { int i; for (i = 0; i < MACSEC_NUM_AN; i++) { struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); RCU_INIT_POINTER(rx_sc->sa[i], NULL); if (sa) clear_rx_sa(sa); } macsec_rxsc_put(rx_sc); } static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) { struct macsec_rx_sc *rx_sc, __rcu **rx_scp; for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); rx_sc; rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { if (rx_sc->sci == sci) { if (rx_sc->active) secy->n_rx_sc--; rcu_assign_pointer(*rx_scp, rx_sc->next); return rx_sc; } } return NULL; } static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, bool active) { struct macsec_rx_sc *rx_sc; struct macsec_dev *macsec; struct net_device *real_dev = macsec_priv(dev)->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); struct macsec_secy *secy; list_for_each_entry(macsec, &rxd->secys, secys) { if (find_rx_sc_rtnl(&macsec->secy, sci)) return ERR_PTR(-EEXIST); } rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); if (!rx_sc) return ERR_PTR(-ENOMEM); rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); if (!rx_sc->stats) { kfree(rx_sc); return ERR_PTR(-ENOMEM); } rx_sc->sci = sci; rx_sc->active = active; refcount_set(&rx_sc->refcnt, 1); secy = &macsec_priv(dev)->secy; rcu_assign_pointer(rx_sc->next, secy->rx_sc); rcu_assign_pointer(secy->rx_sc, rx_sc); if (rx_sc->active) secy->n_rx_sc++; return rx_sc; } static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, int icv_len) { tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); if (!tx_sa->stats) return -ENOMEM; tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); if (IS_ERR(tx_sa->key.tfm)) { free_percpu(tx_sa->stats); return PTR_ERR(tx_sa->key.tfm); } tx_sa->ssci = MACSEC_UNDEF_SSCI; tx_sa->active = false; refcount_set(&tx_sa->refcnt, 1); spin_lock_init(&tx_sa->lock); return 0; } static void clear_tx_sa(struct macsec_tx_sa *tx_sa) { tx_sa->active = false; macsec_txsa_put(tx_sa); } static struct genl_family macsec_fam; static struct net_device *get_dev_from_nl(struct net *net, struct nlattr **attrs) { int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); struct net_device *dev; dev = __dev_get_by_index(net, ifindex); if (!dev) return ERR_PTR(-ENODEV); if (!netif_is_macsec(dev)) return ERR_PTR(-ENODEV); return dev; } static enum macsec_offload nla_get_offload(const struct nlattr *nla) { return (__force enum macsec_offload)nla_get_u8(nla); } static sci_t nla_get_sci(const struct nlattr *nla) { return (__force sci_t)nla_get_u64(nla); } static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, int padattr) { return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); } static ssci_t nla_get_ssci(const struct nlattr *nla) { return (__force ssci_t)nla_get_u32(nla); } static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) { return nla_put_u32(skb, attrtype, (__force u64)value); } static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, struct nlattr **attrs, struct nlattr **tb_sa, struct net_device **devp, struct macsec_secy **secyp, struct macsec_tx_sc **scp, u8 *assoc_num) { struct net_device *dev; struct macsec_secy *secy; struct macsec_tx_sc *tx_sc; struct macsec_tx_sa *tx_sa; if (!tb_sa[MACSEC_SA_ATTR_AN]) return ERR_PTR(-EINVAL); *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); dev = get_dev_from_nl(net, attrs); if (IS_ERR(dev)) return ERR_CAST(dev); if (*assoc_num >= MACSEC_NUM_AN) return ERR_PTR(-EINVAL); secy = &macsec_priv(dev)->secy; tx_sc = &secy->tx_sc; tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); if (!tx_sa) return ERR_PTR(-ENODEV); *devp = dev; *scp = tx_sc; *secyp = secy; return tx_sa; } static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, struct nlattr **attrs, struct nlattr **tb_rxsc, struct net_device **devp, struct macsec_secy **secyp) { struct net_device *dev; struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; sci_t sci; dev = get_dev_from_nl(net, attrs); if (IS_ERR(dev)) return ERR_CAST(dev); secy = &macsec_priv(dev)->secy; if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) return ERR_PTR(-EINVAL); sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); rx_sc = find_rx_sc_rtnl(secy, sci); if (!rx_sc) return ERR_PTR(-ENODEV); *secyp = secy; *devp = dev; return rx_sc; } static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, struct nlattr **attrs, struct nlattr **tb_rxsc, struct nlattr **tb_sa, struct net_device **devp, struct macsec_secy **secyp, struct macsec_rx_sc **scp, u8 *assoc_num) { struct macsec_rx_sc *rx_sc; struct macsec_rx_sa *rx_sa; if (!tb_sa[MACSEC_SA_ATTR_AN]) return ERR_PTR(-EINVAL); *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); if (*assoc_num >= MACSEC_NUM_AN) return ERR_PTR(-EINVAL); rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); if (IS_ERR(rx_sc)) return ERR_CAST(rx_sc); rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); if (!rx_sa) return ERR_PTR(-ENODEV); *scp = rx_sc; return rx_sa; } static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, }; static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, }; static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, .len = MACSEC_KEYID_LEN, }, [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, .len = MACSEC_MAX_KEY_LEN, }, [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, .len = MACSEC_SALT_LEN, }, }; static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, }; /* Offloads an operation to a device driver */ static int macsec_offload(int (* const func)(struct macsec_context *), struct macsec_context *ctx) { int ret; if (unlikely(!func)) return 0; if (ctx->offload == MACSEC_OFFLOAD_PHY) mutex_lock(&ctx->phydev->lock); ret = (*func)(ctx); if (ctx->offload == MACSEC_OFFLOAD_PHY) mutex_unlock(&ctx->phydev->lock); return ret; } static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) { if (!attrs[MACSEC_ATTR_SA_CONFIG]) return -EINVAL; if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) return -EINVAL; return 0; } static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) { if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) return -EINVAL; if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) return -EINVAL; return 0; } static bool validate_add_rxsa(struct nlattr **attrs) { if (!attrs[MACSEC_SA_ATTR_AN] || !attrs[MACSEC_SA_ATTR_KEY] || !attrs[MACSEC_SA_ATTR_KEYID]) return false; if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) return false; if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) return false; } if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) return false; return true; } static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct nlattr **attrs = info->attrs; struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; struct macsec_rx_sa *rx_sa; unsigned char assoc_num; int pn_len; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; int err; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_sa_config(attrs, tb_sa)) return -EINVAL; if (parse_rxsc_config(attrs, tb_rxsc)) return -EINVAL; if (!validate_add_rxsa(tb_sa)) return -EINVAL; rtnl_lock(); rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); rtnl_unlock(); return -EINVAL; } pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; if (tb_sa[MACSEC_SA_ATTR_PN] && nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); rtnl_unlock(); return -EINVAL; } if (secy->xpn) { if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { rtnl_unlock(); return -EINVAL; } if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), MACSEC_SALT_LEN); rtnl_unlock(); return -EINVAL; } } rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); if (rx_sa) { rtnl_unlock(); return -EBUSY; } rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); if (!rx_sa) { rtnl_unlock(); return -ENOMEM; } err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, secy->icv_len); if (err < 0) { kfree(rx_sa); rtnl_unlock(); return err; } if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&rx_sa->lock); rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&rx_sa->lock); } if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); rx_sa->sc = rx_sc; if (secy->xpn) { rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], MACSEC_SALT_LEN); } /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { err = -EOPNOTSUPP; goto cleanup; } ctx.sa.assoc_num = assoc_num; ctx.sa.rx_sa = rx_sa; ctx.secy = secy; memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); err = macsec_offload(ops->mdo_add_rxsa, &ctx); memzero_explicit(ctx.sa.key, secy->key_len); if (err) goto cleanup; } nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); rtnl_unlock(); return 0; cleanup: macsec_rxsa_put(rx_sa); rtnl_unlock(); return err; } static bool validate_add_rxsc(struct nlattr **attrs) { if (!attrs[MACSEC_RXSC_ATTR_SCI]) return false; if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) return false; } return true; } static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; sci_t sci = MACSEC_UNDEF_SCI; struct nlattr **attrs = info->attrs; struct macsec_rx_sc *rx_sc; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct macsec_secy *secy; bool active = true; int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_rxsc_config(attrs, tb_rxsc)) return -EINVAL; if (!validate_add_rxsc(tb_rxsc)) return -EINVAL; rtnl_lock(); dev = get_dev_from_nl(genl_info_net(info), attrs); if (IS_ERR(dev)) { rtnl_unlock(); return PTR_ERR(dev); } secy = &macsec_priv(dev)->secy; sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); rx_sc = create_rx_sc(dev, sci, active); if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { ret = -EOPNOTSUPP; goto cleanup; } ctx.rx_sc = rx_sc; ctx.secy = secy; ret = macsec_offload(ops->mdo_add_rxsc, &ctx); if (ret) goto cleanup; } rtnl_unlock(); return 0; cleanup: del_rx_sc(secy, sci); free_rx_sc(rx_sc); rtnl_unlock(); return ret; } static bool validate_add_txsa(struct nlattr **attrs) { if (!attrs[MACSEC_SA_ATTR_AN] || !attrs[MACSEC_SA_ATTR_PN] || !attrs[MACSEC_SA_ATTR_KEY] || !attrs[MACSEC_SA_ATTR_KEYID]) return false; if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) return false; if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) return false; } if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) return false; return true; } static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct nlattr **attrs = info->attrs; struct macsec_secy *secy; struct macsec_tx_sc *tx_sc; struct macsec_tx_sa *tx_sa; unsigned char assoc_num; int pn_len; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; bool was_operational; int err; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_sa_config(attrs, tb_sa)) return -EINVAL; if (!validate_add_txsa(tb_sa)) return -EINVAL; rtnl_lock(); dev = get_dev_from_nl(genl_info_net(info), attrs); if (IS_ERR(dev)) { rtnl_unlock(); return PTR_ERR(dev); } secy = &macsec_priv(dev)->secy; tx_sc = &secy->tx_sc; assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); rtnl_unlock(); return -EINVAL; } pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); rtnl_unlock(); return -EINVAL; } if (secy->xpn) { if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { rtnl_unlock(); return -EINVAL; } if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), MACSEC_SALT_LEN); rtnl_unlock(); return -EINVAL; } } tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); if (tx_sa) { rtnl_unlock(); return -EBUSY; } tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); if (!tx_sa) { rtnl_unlock(); return -ENOMEM; } err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, secy->icv_len); if (err < 0) { kfree(tx_sa); rtnl_unlock(); return err; } spin_lock_bh(&tx_sa->lock); tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&tx_sa->lock); if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); was_operational = secy->operational; if (assoc_num == tx_sc->encoding_sa && tx_sa->active) secy->operational = true; if (secy->xpn) { tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], MACSEC_SALT_LEN); } /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { err = -EOPNOTSUPP; goto cleanup; } ctx.sa.assoc_num = assoc_num; ctx.sa.tx_sa = tx_sa; ctx.secy = secy; memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); err = macsec_offload(ops->mdo_add_txsa, &ctx); memzero_explicit(ctx.sa.key, secy->key_len); if (err) goto cleanup; } nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); rtnl_unlock(); return 0; cleanup: secy->operational = was_operational; macsec_txsa_put(tx_sa); rtnl_unlock(); return err; } static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) { struct nlattr **attrs = info->attrs; struct net_device *dev; struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; struct macsec_rx_sa *rx_sa; u8 assoc_num; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_sa_config(attrs, tb_sa)) return -EINVAL; if (parse_rxsc_config(attrs, tb_rxsc)) return -EINVAL; rtnl_lock(); rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, &dev, &secy, &rx_sc, &assoc_num); if (IS_ERR(rx_sa)) { rtnl_unlock(); return PTR_ERR(rx_sa); } if (rx_sa->active) { rtnl_unlock(); return -EBUSY; } /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { ret = -EOPNOTSUPP; goto cleanup; } ctx.sa.assoc_num = assoc_num; ctx.sa.rx_sa = rx_sa; ctx.secy = secy; ret = macsec_offload(ops->mdo_del_rxsa, &ctx); if (ret) goto cleanup; } RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); clear_rx_sa(rx_sa); rtnl_unlock(); return 0; cleanup: rtnl_unlock(); return ret; } static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) { struct nlattr **attrs = info->attrs; struct net_device *dev; struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; sci_t sci; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_rxsc_config(attrs, tb_rxsc)) return -EINVAL; if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) return -EINVAL; rtnl_lock(); dev = get_dev_from_nl(genl_info_net(info), info->attrs); if (IS_ERR(dev)) { rtnl_unlock(); return PTR_ERR(dev); } secy = &macsec_priv(dev)->secy; sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); rx_sc = del_rx_sc(secy, sci); if (!rx_sc) { rtnl_unlock(); return -ENODEV; } /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { ret = -EOPNOTSUPP; goto cleanup; } ctx.rx_sc = rx_sc; ctx.secy = secy; ret = macsec_offload(ops->mdo_del_rxsc, &ctx); if (ret) goto cleanup; } free_rx_sc(rx_sc); rtnl_unlock(); return 0; cleanup: rtnl_unlock(); return ret; } static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) { struct nlattr **attrs = info->attrs; struct net_device *dev; struct macsec_secy *secy; struct macsec_tx_sc *tx_sc; struct macsec_tx_sa *tx_sa; u8 assoc_num; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_sa_config(attrs, tb_sa)) return -EINVAL; rtnl_lock(); tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, &dev, &secy, &tx_sc, &assoc_num); if (IS_ERR(tx_sa)) { rtnl_unlock(); return PTR_ERR(tx_sa); } if (tx_sa->active) { rtnl_unlock(); return -EBUSY; } /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { ret = -EOPNOTSUPP; goto cleanup; } ctx.sa.assoc_num = assoc_num; ctx.sa.tx_sa = tx_sa; ctx.secy = secy; ret = macsec_offload(ops->mdo_del_txsa, &ctx); if (ret) goto cleanup; } RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); clear_tx_sa(tx_sa); rtnl_unlock(); return 0; cleanup: rtnl_unlock(); return ret; } static bool validate_upd_sa(struct nlattr **attrs) { if (!attrs[MACSEC_SA_ATTR_AN] || attrs[MACSEC_SA_ATTR_KEY] || attrs[MACSEC_SA_ATTR_KEYID] || attrs[MACSEC_SA_ATTR_SSCI] || attrs[MACSEC_SA_ATTR_SALT]) return false; if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) return false; if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) return false; } return true; } static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) { struct nlattr **attrs = info->attrs; struct net_device *dev; struct macsec_secy *secy; struct macsec_tx_sc *tx_sc; struct macsec_tx_sa *tx_sa; u8 assoc_num; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; bool was_operational, was_active; pn_t prev_pn; int ret = 0; prev_pn.full64 = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_sa_config(attrs, tb_sa)) return -EINVAL; if (!validate_upd_sa(tb_sa)) return -EINVAL; rtnl_lock(); tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, &dev, &secy, &tx_sc, &assoc_num); if (IS_ERR(tx_sa)) { rtnl_unlock(); return PTR_ERR(tx_sa); } if (tb_sa[MACSEC_SA_ATTR_PN]) { int pn_len; pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); rtnl_unlock(); return -EINVAL; } spin_lock_bh(&tx_sa->lock); prev_pn = tx_sa->next_pn_halves; tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&tx_sa->lock); } was_active = tx_sa->active; if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); was_operational = secy->operational; if (assoc_num == tx_sc->encoding_sa) secy->operational = tx_sa->active; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { ret = -EOPNOTSUPP; goto cleanup; } ctx.sa.assoc_num = assoc_num; ctx.sa.tx_sa = tx_sa; ctx.secy = secy; ret = macsec_offload(ops->mdo_upd_txsa, &ctx); if (ret) goto cleanup; } rtnl_unlock(); return 0; cleanup: if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&tx_sa->lock); tx_sa->next_pn_halves = prev_pn; spin_unlock_bh(&tx_sa->lock); } tx_sa->active = was_active; secy->operational = was_operational; rtnl_unlock(); return ret; } static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) { struct nlattr **attrs = info->attrs; struct net_device *dev; struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; struct macsec_rx_sa *rx_sa; u8 assoc_num; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; bool was_active; pn_t prev_pn; int ret = 0; prev_pn.full64 = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_rxsc_config(attrs, tb_rxsc)) return -EINVAL; if (parse_sa_config(attrs, tb_sa)) return -EINVAL; if (!validate_upd_sa(tb_sa)) return -EINVAL; rtnl_lock(); rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, &dev, &secy, &rx_sc, &assoc_num); if (IS_ERR(rx_sa)) { rtnl_unlock(); return PTR_ERR(rx_sa); } if (tb_sa[MACSEC_SA_ATTR_PN]) { int pn_len; pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); rtnl_unlock(); return -EINVAL; } spin_lock_bh(&rx_sa->lock); prev_pn = rx_sa->next_pn_halves; rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); spin_unlock_bh(&rx_sa->lock); } was_active = rx_sa->active; if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { ret = -EOPNOTSUPP; goto cleanup; } ctx.sa.assoc_num = assoc_num; ctx.sa.rx_sa = rx_sa; ctx.secy = secy; ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); if (ret) goto cleanup; } rtnl_unlock(); return 0; cleanup: if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&rx_sa->lock); rx_sa->next_pn_halves = prev_pn; spin_unlock_bh(&rx_sa->lock); } rx_sa->active = was_active; rtnl_unlock(); return ret; } static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) { struct nlattr **attrs = info->attrs; struct net_device *dev; struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; unsigned int prev_n_rx_sc; bool was_active; int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (parse_rxsc_config(attrs, tb_rxsc)) return -EINVAL; if (!validate_add_rxsc(tb_rxsc)) return -EINVAL; rtnl_lock(); rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } was_active = rx_sc->active; prev_n_rx_sc = secy->n_rx_sc; if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); if (rx_sc->active != new) secy->n_rx_sc += new ? 1 : -1; rx_sc->active = new; } /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { ret = -EOPNOTSUPP; goto cleanup; } ctx.rx_sc = rx_sc; ctx.secy = secy; ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); if (ret) goto cleanup; } rtnl_unlock(); return 0; cleanup: secy->n_rx_sc = prev_n_rx_sc; rx_sc->active = was_active; rtnl_unlock(); return ret; } static bool macsec_is_configured(struct macsec_dev *macsec) { struct macsec_secy *secy = &macsec->secy; struct macsec_tx_sc *tx_sc = &secy->tx_sc; int i; if (secy->rx_sc) return true; for (i = 0; i < MACSEC_NUM_AN; i++) if (tx_sc->sa[i]) return true; return false; } static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) { enum macsec_offload prev_offload; const struct macsec_ops *ops; struct macsec_context ctx; struct macsec_dev *macsec; int ret = 0; macsec = macsec_priv(dev); /* Check if the offloading mode is supported by the underlying layers */ if (offload != MACSEC_OFFLOAD_OFF && !macsec_check_offload(offload, macsec)) return -EOPNOTSUPP; /* Check if the net device is busy. */ if (netif_running(dev)) return -EBUSY; /* Check if the device already has rules configured: we do not support * rules migration. */ if (macsec_is_configured(macsec)) return -EBUSY; prev_offload = macsec->offload; ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, macsec, &ctx); if (!ops) return -EOPNOTSUPP; macsec->offload = offload; ctx.secy = &macsec->secy; ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) : macsec_offload(ops->mdo_add_secy, &ctx); if (ret) macsec->offload = prev_offload; return ret; } static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; struct nlattr **attrs = info->attrs; enum macsec_offload offload; struct macsec_dev *macsec; struct net_device *dev; int ret = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; if (!attrs[MACSEC_ATTR_OFFLOAD]) return -EINVAL; if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, attrs[MACSEC_ATTR_OFFLOAD], macsec_genl_offload_policy, NULL)) return -EINVAL; rtnl_lock(); dev = get_dev_from_nl(genl_info_net(info), attrs); if (IS_ERR(dev)) { ret = PTR_ERR(dev); goto out; } macsec = macsec_priv(dev); if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { ret = -EINVAL; goto out; } offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); if (macsec->offload != offload) ret = macsec_update_offload(dev, offload); out: rtnl_unlock(); return ret; } static void get_tx_sa_stats(struct net_device *dev, int an, struct macsec_tx_sa *tx_sa, struct macsec_tx_sa_stats *sum) { struct macsec_dev *macsec = macsec_priv(dev); int cpu; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(macsec, &ctx); if (ops) { ctx.sa.assoc_num = an; ctx.sa.tx_sa = tx_sa; ctx.stats.tx_sa_stats = sum; ctx.secy = &macsec_priv(dev)->secy; macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); } return; } for_each_possible_cpu(cpu) { const struct macsec_tx_sa_stats *stats = per_cpu_ptr(tx_sa->stats, cpu); sum->OutPktsProtected += stats->OutPktsProtected; sum->OutPktsEncrypted += stats->OutPktsEncrypted; } } static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) { if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum->OutPktsProtected) || nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum->OutPktsEncrypted)) return -EMSGSIZE; return 0; } static void get_rx_sa_stats(struct net_device *dev, struct macsec_rx_sc *rx_sc, int an, struct macsec_rx_sa *rx_sa, struct macsec_rx_sa_stats *sum) { struct macsec_dev *macsec = macsec_priv(dev); int cpu; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(macsec, &ctx); if (ops) { ctx.sa.assoc_num = an; ctx.sa.rx_sa = rx_sa; ctx.stats.rx_sa_stats = sum; ctx.secy = &macsec_priv(dev)->secy; ctx.rx_sc = rx_sc; macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); } return; } for_each_possible_cpu(cpu) { const struct macsec_rx_sa_stats *stats = per_cpu_ptr(rx_sa->stats, cpu); sum->InPktsOK += stats->InPktsOK; sum->InPktsInvalid += stats->InPktsInvalid; sum->InPktsNotValid += stats->InPktsNotValid; sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; sum->InPktsUnusedSA += stats->InPktsUnusedSA; } } static int copy_rx_sa_stats(struct sk_buff *skb, struct macsec_rx_sa_stats *sum) { if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum->InPktsInvalid) || nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum->InPktsNotValid) || nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum->InPktsNotUsingSA) || nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum->InPktsUnusedSA)) return -EMSGSIZE; return 0; } static void get_rx_sc_stats(struct net_device *dev, struct macsec_rx_sc *rx_sc, struct macsec_rx_sc_stats *sum) { struct macsec_dev *macsec = macsec_priv(dev); int cpu; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(macsec, &ctx); if (ops) { ctx.stats.rx_sc_stats = sum; ctx.secy = &macsec_priv(dev)->secy; ctx.rx_sc = rx_sc; macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); } return; } for_each_possible_cpu(cpu) { const struct pcpu_rx_sc_stats *stats; struct macsec_rx_sc_stats tmp; unsigned int start; stats = per_cpu_ptr(rx_sc->stats, cpu); do { start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); } while (u64_stats_fetch_retry(&stats->syncp, start)); sum->InOctetsValidated += tmp.InOctetsValidated; sum->InOctetsDecrypted += tmp.InOctetsDecrypted; sum->InPktsUnchecked += tmp.InPktsUnchecked; sum->InPktsDelayed += tmp.InPktsDelayed; sum->InPktsOK += tmp.InPktsOK; sum->InPktsInvalid += tmp.InPktsInvalid; sum->InPktsLate += tmp.InPktsLate; sum->InPktsNotValid += tmp.InPktsNotValid; sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; sum->InPktsUnusedSA += tmp.InPktsUnusedSA; } } static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) { if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, sum->InOctetsValidated, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, sum->InOctetsDecrypted, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, sum->InPktsUnchecked, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, sum->InPktsDelayed, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, sum->InPktsInvalid, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, sum->InPktsLate, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, sum->InPktsNotValid, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum->InPktsNotUsingSA, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, sum->InPktsUnusedSA, MACSEC_RXSC_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; } static void get_tx_sc_stats(struct net_device *dev, struct macsec_tx_sc_stats *sum) { struct macsec_dev *macsec = macsec_priv(dev); int cpu; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(macsec, &ctx); if (ops) { ctx.stats.tx_sc_stats = sum; ctx.secy = &macsec_priv(dev)->secy; macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); } return; } for_each_possible_cpu(cpu) { const struct pcpu_tx_sc_stats *stats; struct macsec_tx_sc_stats tmp; unsigned int start; stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); do { start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); } while (u64_stats_fetch_retry(&stats->syncp, start)); sum->OutPktsProtected += tmp.OutPktsProtected; sum->OutPktsEncrypted += tmp.OutPktsEncrypted; sum->OutOctetsProtected += tmp.OutOctetsProtected; sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; } } static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) { if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, sum->OutPktsProtected, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum->OutPktsEncrypted, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, sum->OutOctetsProtected, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, sum->OutOctetsEncrypted, MACSEC_TXSC_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; } static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) { struct macsec_dev *macsec = macsec_priv(dev); int cpu; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(macsec, &ctx); if (ops) { ctx.stats.dev_stats = sum; ctx.secy = &macsec_priv(dev)->secy; macsec_offload(ops->mdo_get_dev_stats, &ctx); } return; } for_each_possible_cpu(cpu) { const struct pcpu_secy_stats *stats; struct macsec_dev_stats tmp; unsigned int start; stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); do { start = u64_stats_fetch_begin(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); } while (u64_stats_fetch_retry(&stats->syncp, start)); sum->OutPktsUntagged += tmp.OutPktsUntagged; sum->InPktsUntagged += tmp.InPktsUntagged; sum->OutPktsTooLong += tmp.OutPktsTooLong; sum->InPktsNoTag += tmp.InPktsNoTag; sum->InPktsBadTag += tmp.InPktsBadTag; sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; sum->InPktsNoSCI += tmp.InPktsNoSCI; sum->InPktsOverrun += tmp.InPktsOverrun; } } static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) { if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, sum->OutPktsUntagged, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, sum->InPktsUntagged, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, sum->OutPktsTooLong, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, sum->InPktsNoTag, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, sum->InPktsBadTag, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, sum->InPktsUnknownSCI, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, sum->InPktsNoSCI, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, sum->InPktsOverrun, MACSEC_SECY_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; } static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) { struct macsec_tx_sc *tx_sc = &secy->tx_sc; struct nlattr *secy_nest = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY); u64 csid; if (!secy_nest) return 1; switch (secy->key_len) { case MACSEC_GCM_AES_128_SAK_LEN: csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; break; case MACSEC_GCM_AES_256_SAK_LEN: csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; break; default: goto cancel; } if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, MACSEC_SECY_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, csid, MACSEC_SECY_ATTR_PAD) || nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) goto cancel; if (secy->replay_protect) { if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) goto cancel; } nla_nest_end(skb, secy_nest); return 0; cancel: nla_nest_cancel(skb, secy_nest); return 1; } static noinline_for_stack int dump_secy(struct macsec_secy *secy, struct net_device *dev, struct sk_buff *skb, struct netlink_callback *cb) { struct macsec_tx_sc_stats tx_sc_stats = {0, }; struct macsec_tx_sa_stats tx_sa_stats = {0, }; struct macsec_rx_sc_stats rx_sc_stats = {0, }; struct macsec_rx_sa_stats rx_sa_stats = {0, }; struct macsec_dev *macsec = netdev_priv(dev); struct macsec_dev_stats dev_stats = {0, }; struct macsec_tx_sc *tx_sc = &secy->tx_sc; struct nlattr *txsa_list, *rxsc_list; struct macsec_rx_sc *rx_sc; struct nlattr *attr; void *hdr; int i, j; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); if (!hdr) return -EMSGSIZE; genl_dump_check_consistent(cb, hdr); if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); if (!attr) goto nla_put_failure; if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) goto nla_put_failure; nla_nest_end(skb, attr); if (nla_put_secy(secy, skb)) goto nla_put_failure; attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); if (!attr) goto nla_put_failure; get_tx_sc_stats(dev, &tx_sc_stats); if (copy_tx_sc_stats(skb, &tx_sc_stats)) { nla_nest_cancel(skb, attr); goto nla_put_failure; } nla_nest_end(skb, attr); attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); if (!attr) goto nla_put_failure; get_secy_stats(dev, &dev_stats); if (copy_secy_stats(skb, &dev_stats)) { nla_nest_cancel(skb, attr); goto nla_put_failure; } nla_nest_end(skb, attr); txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); if (!txsa_list) goto nla_put_failure; for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); struct nlattr *txsa_nest; u64 pn; int pn_len; if (!tx_sa) continue; txsa_nest = nla_nest_start_noflag(skb, j++); if (!txsa_nest) { nla_nest_cancel(skb, txsa_list); goto nla_put_failure; } attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); if (!attr) { nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); goto nla_put_failure; } memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); if (copy_tx_sa_stats(skb, &tx_sa_stats)) { nla_nest_cancel(skb, attr); nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); goto nla_put_failure; } nla_nest_end(skb, attr); if (secy->xpn) { pn = tx_sa->next_pn; pn_len = MACSEC_XPN_PN_LEN; } else { pn = tx_sa->next_pn_halves.lower; pn_len = MACSEC_DEFAULT_PN_LEN; } if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); goto nla_put_failure; } nla_nest_end(skb, txsa_nest); } nla_nest_end(skb, txsa_list); rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); if (!rxsc_list) goto nla_put_failure; j = 1; for_each_rxsc_rtnl(secy, rx_sc) { int k; struct nlattr *rxsa_list; struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); if (!rxsc_nest) { nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, MACSEC_RXSC_ATTR_PAD)) { nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); if (!attr) { nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); if (copy_rx_sc_stats(skb, &rx_sc_stats)) { nla_nest_cancel(skb, attr); nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } nla_nest_end(skb, attr); rxsa_list = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_SA_LIST); if (!rxsa_list) { nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); struct nlattr *rxsa_nest; u64 pn; int pn_len; if (!rx_sa) continue; rxsa_nest = nla_nest_start_noflag(skb, k++); if (!rxsa_nest) { nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); if (!attr) { nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); if (copy_rx_sa_stats(skb, &rx_sa_stats)) { nla_nest_cancel(skb, attr); nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } nla_nest_end(skb, attr); if (secy->xpn) { pn = rx_sa->next_pn; pn_len = MACSEC_XPN_PN_LEN; } else { pn = rx_sa->next_pn_halves.lower; pn_len = MACSEC_DEFAULT_PN_LEN; } if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { nla_nest_cancel(skb, rxsa_nest); nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } nla_nest_end(skb, rxsa_nest); } nla_nest_end(skb, rxsa_list); nla_nest_end(skb, rxsc_nest); } nla_nest_end(skb, rxsc_list); genlmsg_end(skb, hdr); return 0; nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int macsec_generation = 1; /* protected by RTNL */ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct net_device *dev; int dev_idx, d; dev_idx = cb->args[0]; d = 0; rtnl_lock(); cb->seq = macsec_generation; for_each_netdev(net, dev) { struct macsec_secy *secy; if (d < dev_idx) goto next; if (!netif_is_macsec(dev)) goto next; secy = &macsec_priv(dev)->secy; if (dump_secy(secy, dev, skb, cb) < 0) goto done; next: d++; } done: rtnl_unlock(); cb->args[0] = d; return skb->len; } static const struct genl_small_ops macsec_genl_ops[] = { { .cmd = MACSEC_CMD_GET_TXSC, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .dumpit = macsec_dump_txsc, }, { .cmd = MACSEC_CMD_ADD_RXSC, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_rxsc, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_RXSC, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_rxsc, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_RXSC, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_rxsc, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_ADD_TXSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_txsa, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_TXSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_txsa, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_TXSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_txsa, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_ADD_RXSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_rxsa, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_RXSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_rxsa, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_RXSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_rxsa, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_OFFLOAD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_offload, .flags = GENL_ADMIN_PERM, }, }; static struct genl_family macsec_fam __ro_after_init = { .name = MACSEC_GENL_NAME, .hdrsize = 0, .version = MACSEC_GENL_VERSION, .maxattr = MACSEC_ATTR_MAX, .policy = macsec_genl_policy, .netnsok = true, .module = THIS_MODULE, .small_ops = macsec_genl_ops, .n_small_ops = ARRAY_SIZE(macsec_genl_ops), .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, }; static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct macsec_dev *macsec = netdev_priv(dev); struct macsec_secy *secy = &macsec->secy; struct pcpu_secy_stats *secy_stats; int ret, len; if (macsec_is_offloaded(netdev_priv(dev))) { struct metadata_dst *md_dst = secy->tx_sc.md_dst; skb_dst_drop(skb); dst_hold(&md_dst->dst); skb_dst_set(skb, &md_dst->dst); skb->dev = macsec->real_dev; return dev_queue_xmit(skb); } /* 10.5 */ if (!secy->protect_frames) { secy_stats = this_cpu_ptr(macsec->stats); u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.OutPktsUntagged++; u64_stats_update_end(&secy_stats->syncp); skb->dev = macsec->real_dev; len = skb->len; ret = dev_queue_xmit(skb); count_tx(dev, ret, len); return ret; } if (!secy->operational) { kfree_skb(skb); DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } len = skb->len; skb = macsec_encrypt(skb, dev); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EINPROGRESS) DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); macsec_encrypt_finish(skb, dev); ret = dev_queue_xmit(skb); count_tx(dev, ret, len); return ret; } #define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) static int macsec_dev_init(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; int err; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; err = gro_cells_init(&macsec->gro_cells, dev); if (err) { free_percpu(dev->tstats); return err; } dev->features = real_dev->features & MACSEC_FEATURES; dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; dev->needed_headroom = real_dev->needed_headroom + MACSEC_NEEDED_HEADROOM; dev->needed_tailroom = real_dev->needed_tailroom + MACSEC_NEEDED_TAILROOM; if (is_zero_ether_addr(dev->dev_addr)) eth_hw_addr_inherit(dev, real_dev); if (is_zero_ether_addr(dev->broadcast)) memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); /* Get macsec's reference to real_dev */ netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); return 0; } static void macsec_dev_uninit(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); gro_cells_destroy(&macsec->gro_cells); free_percpu(dev->tstats); } static netdev_features_t macsec_fix_features(struct net_device *dev, netdev_features_t features) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; features &= (real_dev->features & MACSEC_FEATURES) | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; features |= NETIF_F_LLTX; return features; } static int macsec_dev_open(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; int err; err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) return err; if (dev->flags & IFF_ALLMULTI) { err = dev_set_allmulti(real_dev, 1); if (err < 0) goto del_unicast; } if (dev->flags & IFF_PROMISC) { err = dev_set_promiscuity(real_dev, 1); if (err < 0) goto clear_allmulti; } /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { err = -EOPNOTSUPP; goto clear_allmulti; } ctx.secy = &macsec->secy; err = macsec_offload(ops->mdo_dev_open, &ctx); if (err) goto clear_allmulti; } if (netif_carrier_ok(real_dev)) netif_carrier_on(dev); return 0; clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); del_unicast: dev_uc_del(real_dev, dev->dev_addr); netif_carrier_off(dev); return err; } static int macsec_dev_stop(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; netif_carrier_off(dev); /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(macsec, &ctx); if (ops) { ctx.secy = &macsec->secy; macsec_offload(ops->mdo_dev_stop, &ctx); } } dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); if (dev->flags & IFF_PROMISC) dev_set_promiscuity(real_dev, -1); dev_uc_del(real_dev, dev->dev_addr); return 0; } static void macsec_dev_change_rx_flags(struct net_device *dev, int change) { struct net_device *real_dev = macsec_priv(dev)->real_dev; if (!(dev->flags & IFF_UP)) return; if (change & IFF_ALLMULTI) dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); if (change & IFF_PROMISC) dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); } static void macsec_dev_set_rx_mode(struct net_device *dev) { struct net_device *real_dev = macsec_priv(dev)->real_dev; dev_mc_sync(real_dev, dev); dev_uc_sync(real_dev, dev); } static int macsec_set_mac_address(struct net_device *dev, void *p) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!(dev->flags & IFF_UP)) goto out; err = dev_uc_add(real_dev, addr->sa_data); if (err < 0) return err; dev_uc_del(real_dev, dev->dev_addr); out: eth_hw_addr_set(dev, addr->sa_data); /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(macsec, &ctx); if (ops) { ctx.secy = &macsec->secy; macsec_offload(ops->mdo_upd_secy, &ctx); } } return 0; } static int macsec_change_mtu(struct net_device *dev, int new_mtu) { struct macsec_dev *macsec = macsec_priv(dev); unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); if (macsec->real_dev->mtu - extra < new_mtu) return -ERANGE; dev->mtu = new_mtu; return 0; } static void macsec_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *s) { if (!dev->tstats) return; dev_fetch_sw_netstats(s, dev->tstats); s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); } static int macsec_get_iflink(const struct net_device *dev) { return macsec_priv(dev)->real_dev->ifindex; } static const struct net_device_ops macsec_netdev_ops = { .ndo_init = macsec_dev_init, .ndo_uninit = macsec_dev_uninit, .ndo_open = macsec_dev_open, .ndo_stop = macsec_dev_stop, .ndo_fix_features = macsec_fix_features, .ndo_change_mtu = macsec_change_mtu, .ndo_set_rx_mode = macsec_dev_set_rx_mode, .ndo_change_rx_flags = macsec_dev_change_rx_flags, .ndo_set_mac_address = macsec_set_mac_address, .ndo_start_xmit = macsec_start_xmit, .ndo_get_stats64 = macsec_get_stats64, .ndo_get_iflink = macsec_get_iflink, }; static const struct device_type macsec_type = { .name = "macsec", }; static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, [IFLA_MACSEC_ES] = { .type = NLA_U8 }, [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, }; static void macsec_free_netdev(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); if (macsec->secy.tx_sc.md_dst) metadata_dst_free(macsec->secy.tx_sc.md_dst); free_percpu(macsec->stats); free_percpu(macsec->secy.tx_sc.stats); /* Get rid of the macsec's reference to real_dev */ netdev_put(macsec->real_dev, &macsec->dev_tracker); } static void macsec_setup(struct net_device *dev) { ether_setup(dev); dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &macsec_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = macsec_free_netdev; SET_NETDEV_DEVTYPE(dev, &macsec_type); eth_zero_addr(dev->broadcast); } static int macsec_changelink_common(struct net_device *dev, struct nlattr *data[]) { struct macsec_secy *secy; struct macsec_tx_sc *tx_sc; secy = &macsec_priv(dev)->secy; tx_sc = &secy->tx_sc; if (data[IFLA_MACSEC_ENCODING_SA]) { struct macsec_tx_sa *tx_sa; tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); secy->operational = tx_sa && tx_sa->active; } if (data[IFLA_MACSEC_ENCRYPT]) tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); if (data[IFLA_MACSEC_PROTECT]) secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); if (data[IFLA_MACSEC_INC_SCI]) tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); if (data[IFLA_MACSEC_ES]) tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); if (data[IFLA_MACSEC_SCB]) tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); if (data[IFLA_MACSEC_REPLAY_PROTECT]) secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); if (data[IFLA_MACSEC_VALIDATION]) secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); if (data[IFLA_MACSEC_CIPHER_SUITE]) { switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { case MACSEC_CIPHER_ID_GCM_AES_128: case MACSEC_DEFAULT_CIPHER_ID: secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; secy->xpn = false; break; case MACSEC_CIPHER_ID_GCM_AES_256: secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; secy->xpn = false; break; case MACSEC_CIPHER_ID_GCM_AES_XPN_128: secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; secy->xpn = true; break; case MACSEC_CIPHER_ID_GCM_AES_XPN_256: secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; secy->xpn = true; break; default: return -EINVAL; } } if (data[IFLA_MACSEC_WINDOW]) { secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window * for XPN cipher suites */ if (secy->xpn && secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) return -EINVAL; } return 0; } static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct macsec_dev *macsec = macsec_priv(dev); bool macsec_offload_state_change = false; enum macsec_offload offload; struct macsec_tx_sc tx_sc; struct macsec_secy secy; int ret; if (!data) return 0; if (data[IFLA_MACSEC_CIPHER_SUITE] || data[IFLA_MACSEC_ICV_LEN] || data[IFLA_MACSEC_SCI] || data[IFLA_MACSEC_PORT]) return -EINVAL; /* Keep a copy of unmodified secy and tx_sc, in case the offload * propagation fails, to revert macsec_changelink_common. */ memcpy(&secy, &macsec->secy, sizeof(secy)); memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); ret = macsec_changelink_common(dev, data); if (ret) goto cleanup; if (data[IFLA_MACSEC_OFFLOAD]) { offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); if (macsec->offload != offload) { macsec_offload_state_change = true; ret = macsec_update_offload(dev, offload); if (ret) goto cleanup; } } /* If h/w offloading is available, propagate to the device */ if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { ret = -EOPNOTSUPP; goto cleanup; } ctx.secy = &macsec->secy; ret = macsec_offload(ops->mdo_upd_secy, &ctx); if (ret) goto cleanup; } return 0; cleanup: memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); memcpy(&macsec->secy, &secy, sizeof(secy)); return ret; } static void macsec_del_dev(struct macsec_dev *macsec) { int i; while (macsec->secy.rx_sc) { struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); free_rx_sc(rx_sc); } for (i = 0; i < MACSEC_NUM_AN; i++) { struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); if (sa) { RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); clear_tx_sa(sa); } } } static void macsec_common_dellink(struct net_device *dev, struct list_head *head) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (ops) { ctx.secy = &macsec->secy; macsec_offload(ops->mdo_del_secy, &ctx); } } unregister_netdevice_queue(dev, head); list_del_rcu(&macsec->secys); macsec_del_dev(macsec); netdev_upper_dev_unlink(real_dev, dev); macsec_generation++; } static void macsec_dellink(struct net_device *dev, struct list_head *head) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); macsec_common_dellink(dev, head); if (list_empty(&rxd->secys)) { netdev_rx_handler_unregister(real_dev); kfree(rxd); } } static int register_macsec_dev(struct net_device *real_dev, struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); if (!rxd) { int err; rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); if (!rxd) return -ENOMEM; INIT_LIST_HEAD(&rxd->secys); err = netdev_rx_handler_register(real_dev, macsec_handle_frame, rxd); if (err < 0) { kfree(rxd); return err; } } list_add_tail_rcu(&macsec->secys, &rxd->secys); return 0; } static bool sci_exists(struct net_device *dev, sci_t sci) { struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); struct macsec_dev *macsec; list_for_each_entry(macsec, &rxd->secys, secys) { if (macsec->secy.sci == sci) return true; } return false; } static sci_t dev_to_sci(struct net_device *dev, __be16 port) { return make_sci(dev->dev_addr, port); } static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) { struct macsec_dev *macsec = macsec_priv(dev); struct macsec_secy *secy = &macsec->secy; macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); if (!macsec->stats) return -ENOMEM; secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); if (!secy->tx_sc.stats) return -ENOMEM; secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); if (!secy->tx_sc.md_dst) /* macsec and secy percpu stats will be freed when unregistering * net_device in macsec_free_netdev() */ return -ENOMEM; if (sci == MACSEC_UNDEF_SCI) sci = dev_to_sci(dev, MACSEC_PORT_ES); secy->netdev = dev; secy->operational = true; secy->key_len = DEFAULT_SAK_LEN; secy->icv_len = icv_len; secy->validate_frames = MACSEC_VALIDATE_DEFAULT; secy->protect_frames = true; secy->replay_protect = false; secy->xpn = DEFAULT_XPN; secy->sci = sci; secy->tx_sc.md_dst->u.macsec_info.sci = sci; secy->tx_sc.active = true; secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; secy->tx_sc.encrypt = DEFAULT_ENCRYPT; secy->tx_sc.send_sci = DEFAULT_SEND_SCI; secy->tx_sc.end_station = false; secy->tx_sc.scb = false; return 0; } static struct lock_class_key macsec_netdev_addr_lock_key; static int macsec_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct macsec_dev *macsec = macsec_priv(dev); rx_handler_func_t *rx_handler; u8 icv_len = MACSEC_DEFAULT_ICV_LEN; struct net_device *real_dev; int err, mtu; sci_t sci; if (!tb[IFLA_LINK]) return -EINVAL; real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; if (real_dev->type != ARPHRD_ETHER) return -EINVAL; dev->priv_flags |= IFF_MACSEC; macsec->real_dev = real_dev; if (data && data[IFLA_MACSEC_OFFLOAD]) macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); else /* MACsec offloading is off by default */ macsec->offload = MACSEC_OFFLOAD_OFF; /* Check if the offloading mode is supported by the underlying layers */ if (macsec->offload != MACSEC_OFFLOAD_OFF && !macsec_check_offload(macsec->offload, macsec)) return -EOPNOTSUPP; /* send_sci must be set to true when transmit sci explicitly is set */ if ((data && data[IFLA_MACSEC_SCI]) && (data && data[IFLA_MACSEC_INC_SCI])) { u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); if (!send_sci) return -EINVAL; } if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); mtu = real_dev->mtu - icv_len - macsec_extra_len(true); if (mtu < 0) dev->mtu = 0; else dev->mtu = mtu; rx_handler = rtnl_dereference(real_dev->rx_handler); if (rx_handler && rx_handler != macsec_handle_frame) return -EBUSY; err = register_netdevice(dev); if (err < 0) return err; netdev_lockdep_set_classes(dev); lockdep_set_class(&dev->addr_list_lock, &macsec_netdev_addr_lock_key); err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) goto unregister; /* need to be already registered so that ->init has run and * the MAC addr is set */ if (data && data[IFLA_MACSEC_SCI]) sci = nla_get_sci(data[IFLA_MACSEC_SCI]); else if (data && data[IFLA_MACSEC_PORT]) sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); else sci = dev_to_sci(dev, MACSEC_PORT_ES); if (rx_handler && sci_exists(real_dev, sci)) { err = -EBUSY; goto unlink; } err = macsec_add_dev(dev, sci, icv_len); if (err) goto unlink; if (data) { err = macsec_changelink_common(dev, data); if (err) goto del_dev; } /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; ops = macsec_get_ops(macsec, &ctx); if (ops) { ctx.secy = &macsec->secy; err = macsec_offload(ops->mdo_add_secy, &ctx); if (err) goto del_dev; } } err = register_macsec_dev(real_dev, dev); if (err < 0) goto del_dev; netif_stacked_transfer_operstate(real_dev, dev); linkwatch_fire_event(dev); macsec_generation++; return 0; del_dev: macsec_del_dev(macsec); unlink: netdev_upper_dev_unlink(real_dev, dev); unregister: unregister_netdevice(dev); return err; } static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { u64 csid = MACSEC_DEFAULT_CIPHER_ID; u8 icv_len = MACSEC_DEFAULT_ICV_LEN; int flag; bool es, scb, sci; if (!data) return 0; if (data[IFLA_MACSEC_CIPHER_SUITE]) csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); if (data[IFLA_MACSEC_ICV_LEN]) { icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); if (icv_len != MACSEC_DEFAULT_ICV_LEN) { char dummy_key[DEFAULT_SAK_LEN] = { 0 }; struct crypto_aead *dummy_tfm; dummy_tfm = macsec_alloc_tfm(dummy_key, DEFAULT_SAK_LEN, icv_len); if (IS_ERR(dummy_tfm)) return PTR_ERR(dummy_tfm); crypto_free_aead(dummy_tfm); } } switch (csid) { case MACSEC_CIPHER_ID_GCM_AES_128: case MACSEC_CIPHER_ID_GCM_AES_256: case MACSEC_CIPHER_ID_GCM_AES_XPN_128: case MACSEC_CIPHER_ID_GCM_AES_XPN_256: case MACSEC_DEFAULT_CIPHER_ID: if (icv_len < MACSEC_MIN_ICV_LEN || icv_len > MACSEC_STD_ICV_LEN) return -EINVAL; break; default: return -EINVAL; } if (data[IFLA_MACSEC_ENCODING_SA]) { if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) return -EINVAL; } for (flag = IFLA_MACSEC_ENCODING_SA + 1; flag < IFLA_MACSEC_VALIDATION; flag++) { if (data[flag]) { if (nla_get_u8(data[flag]) > 1) return -EINVAL; } } es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; if ((sci && (scb || es)) || (scb && es)) return -EINVAL; if (data[IFLA_MACSEC_VALIDATION] && nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) return -EINVAL; if ((data[IFLA_MACSEC_REPLAY_PROTECT] && nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && !data[IFLA_MACSEC_WINDOW]) return -EINVAL; return 0; } static struct net *macsec_get_link_net(const struct net_device *dev) { return dev_net(macsec_priv(dev)->real_dev); } struct net_device *macsec_get_real_dev(const struct net_device *dev) { return macsec_priv(dev)->real_dev; } EXPORT_SYMBOL_GPL(macsec_get_real_dev); bool macsec_netdev_is_offloaded(struct net_device *dev) { return macsec_is_offloaded(macsec_priv(dev)); } EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); static size_t macsec_get_size(const struct net_device *dev) { return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ nla_total_size(1) + /* IFLA_MACSEC_ES */ nla_total_size(1) + /* IFLA_MACSEC_SCB */ nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 0; } static int macsec_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct macsec_tx_sc *tx_sc; struct macsec_dev *macsec; struct macsec_secy *secy; u64 csid; macsec = macsec_priv(dev); secy = &macsec->secy; tx_sc = &secy->tx_sc; switch (secy->key_len) { case MACSEC_GCM_AES_128_SAK_LEN: csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; break; case MACSEC_GCM_AES_256_SAK_LEN: csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; break; default: goto nla_put_failure; } if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, IFLA_MACSEC_PAD) || nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, csid, IFLA_MACSEC_PAD) || nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 0) goto nla_put_failure; if (secy->replay_protect) { if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops macsec_link_ops __read_mostly = { .kind = "macsec", .priv_size = sizeof(struct macsec_dev), .maxtype = IFLA_MACSEC_MAX, .policy = macsec_rtnl_policy, .setup = macsec_setup, .validate = macsec_validate_attr, .newlink = macsec_newlink, .changelink = macsec_changelink, .dellink = macsec_dellink, .get_size = macsec_get_size, .fill_info = macsec_fill_info, .get_link_net = macsec_get_link_net, }; static bool is_macsec_master(struct net_device *dev) { return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; } static int macsec_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); LIST_HEAD(head); if (!is_macsec_master(real_dev)) return NOTIFY_DONE; switch (event) { case NETDEV_DOWN: case NETDEV_UP: case NETDEV_CHANGE: { struct macsec_dev *m, *n; struct macsec_rxh_data *rxd; rxd = macsec_data_rtnl(real_dev); list_for_each_entry_safe(m, n, &rxd->secys, secys) { struct net_device *dev = m->secy.netdev; netif_stacked_transfer_operstate(real_dev, dev); } break; } case NETDEV_UNREGISTER: { struct macsec_dev *m, *n; struct macsec_rxh_data *rxd; rxd = macsec_data_rtnl(real_dev); list_for_each_entry_safe(m, n, &rxd->secys, secys) { macsec_common_dellink(m->secy.netdev, &head); } netdev_rx_handler_unregister(real_dev); kfree(rxd); unregister_netdevice_many(&head); break; } case NETDEV_CHANGEMTU: { struct macsec_dev *m; struct macsec_rxh_data *rxd; rxd = macsec_data_rtnl(real_dev); list_for_each_entry(m, &rxd->secys, secys) { struct net_device *dev = m->secy.netdev; unsigned int mtu = real_dev->mtu - (m->secy.icv_len + macsec_extra_len(true)); if (dev->mtu > mtu) dev_set_mtu(dev, mtu); } } } return NOTIFY_OK; } static struct notifier_block macsec_notifier = { .notifier_call = macsec_notify, }; static int __init macsec_init(void) { int err; pr_info("MACsec IEEE 802.1AE\n"); err = register_netdevice_notifier(&macsec_notifier); if (err) return err; err = rtnl_link_register(&macsec_link_ops); if (err) goto notifier; err = genl_register_family(&macsec_fam); if (err) goto rtnl; return 0; rtnl: rtnl_link_unregister(&macsec_link_ops); notifier: unregister_netdevice_notifier(&macsec_notifier); return err; } static void __exit macsec_exit(void) { genl_unregister_family(&macsec_fam); rtnl_link_unregister(&macsec_link_ops); unregister_netdevice_notifier(&macsec_notifier); rcu_barrier(); } module_init(macsec_init); module_exit(macsec_exit); MODULE_ALIAS_RTNL_LINK("macsec"); MODULE_ALIAS_GENL_FAMILY("macsec"); MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/macsec.c
// SPDX-License-Identifier: GPL-2.0-or-later /* A network driver using virtio. * * Copyright 2007 Rusty Russell <[email protected]> IBM Corporation */ //#define DEBUG #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_net.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/scatterlist.h> #include <linux/if_vlan.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/average.h> #include <linux/filter.h> #include <linux/kernel.h> #include <net/route.h> #include <net/xdp.h> #include <net/net_failover.h> #include <net/netdev_rx_queue.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); static bool csum = true, gso = true, napi_tx = true; module_param(csum, bool, 0444); module_param(gso, bool, 0444); module_param(napi_tx, bool, 0644); /* FIXME: MTU in config. */ #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ #define VIRTIO_XDP_HEADROOM 256 /* Separating two types of XDP xmit */ #define VIRTIO_XDP_TX BIT(0) #define VIRTIO_XDP_REDIR BIT(1) #define VIRTIO_XDP_FLAG BIT(0) /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- * term, transient changes in packet size. */ DECLARE_EWMA(pkt_len, 0, 64) #define VIRTNET_DRIVER_VERSION "1.0.0" static const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, VIRTIO_NET_F_GUEST_HDRLEN }; #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ (1ULL << VIRTIO_NET_F_GUEST_USO4) | \ (1ULL << VIRTIO_NET_F_GUEST_USO6)) struct virtnet_stat_desc { char desc[ETH_GSTRING_LEN]; size_t offset; }; struct virtnet_sq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; u64 xdp_tx; u64 xdp_tx_drops; u64 kicks; u64 tx_timeouts; }; struct virtnet_rq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; u64 drops; u64 xdp_packets; u64 xdp_tx; u64 xdp_redirects; u64 xdp_drops; u64 kicks; }; #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "packets", VIRTNET_SQ_STAT(packets) }, { "bytes", VIRTNET_SQ_STAT(bytes) }, { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, { "kicks", VIRTNET_SQ_STAT(kicks) }, { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { { "packets", VIRTNET_RQ_STAT(packets) }, { "bytes", VIRTNET_RQ_STAT(bytes) }, { "drops", VIRTNET_RQ_STAT(drops) }, { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, { "kicks", VIRTNET_RQ_STAT(kicks) }, }; #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) struct virtnet_interrupt_coalesce { u32 max_packets; u32 max_usecs; }; /* The dma information of pages allocated at a time. */ struct virtnet_rq_dma { dma_addr_t addr; u32 ref; u16 len; u16 need_sync; }; /* Internal representation of a send virtqueue */ struct send_queue { /* Virtqueue associated with this send _queue */ struct virtqueue *vq; /* TX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of the send queue: output.$index */ char name[16]; struct virtnet_sq_stats stats; struct virtnet_interrupt_coalesce intr_coal; struct napi_struct napi; /* Record whether sq is in reset state. */ bool reset; }; /* Internal representation of a receive virtqueue */ struct receive_queue { /* Virtqueue associated with this receive_queue */ struct virtqueue *vq; struct napi_struct napi; struct bpf_prog __rcu *xdp_prog; struct virtnet_rq_stats stats; struct virtnet_interrupt_coalesce intr_coal; /* Chain pages by the private ptr. */ struct page *pages; /* Average packet length for mergeable receive buffers. */ struct ewma_pkt_len mrg_avg_pkt_len; /* Page frag for packet buffer allocation. */ struct page_frag alloc_frag; /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Min single buffer size for mergeable buffers case. */ unsigned int min_buf_len; /* Name of this receive queue: input.$index */ char name[16]; struct xdp_rxq_info xdp_rxq; /* Record the last dma info to free after new pages is allocated. */ struct virtnet_rq_dma *last_dma; /* Do dma by self */ bool do_dma; }; /* This structure can contain rss message with maximum settings for indirection table and keysize * Note, that default structure that describes RSS configuration virtio_net_rss_config * contains same info but can't handle table values. * In any case, structure would be passed to virtio hw through sg_buf split by parts * because table sizes may be differ according to the device configuration. */ #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128 struct virtio_net_ctrl_rss { u32 hash_types; u16 indirection_table_mask; u16 unclassified_queue; u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN]; u16 max_tx_vq; u8 hash_key_length; u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; }; /* Control VQ buffers: protected by the rtnl lock */ struct control_buf { struct virtio_net_ctrl_hdr hdr; virtio_net_ctrl_ack status; struct virtio_net_ctrl_mq mq; u8 promisc; u8 allmulti; __virtio16 vid; __virtio64 offloads; struct virtio_net_ctrl_rss rss; struct virtio_net_ctrl_coal_tx coal_tx; struct virtio_net_ctrl_coal_rx coal_rx; struct virtio_net_ctrl_coal_vq coal_vq; }; struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; struct net_device *dev; struct send_queue *sq; struct receive_queue *rq; unsigned int status; /* Max # of queue pairs supported by the device */ u16 max_queue_pairs; /* # of queue pairs currently used by the driver */ u16 curr_queue_pairs; /* # of XDP queue pairs currently used by the driver */ u16 xdp_queue_pairs; /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ bool xdp_enabled; /* I like... big packets and I cannot lie! */ bool big_packets; /* number of sg entries allocated for big packets */ unsigned int big_packets_num_skbfrags; /* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; /* Host supports rss and/or hash report */ bool has_rss; bool has_rss_hash_report; u8 rss_key_size; u16 rss_indir_table_size; u32 rss_hash_types_supported; u32 rss_hash_types_saved; /* Has control virtqueue */ bool has_cvq; /* Host can handle any s/g split between our header and packet data */ bool any_header_sg; /* Packet virtio header size */ u8 hdr_len; /* Work struct for delayed refilling if we run low on memory. */ struct delayed_work refill; /* Is delayed refill enabled? */ bool refill_enabled; /* The lock to synchronize the access to refill_enabled */ spinlock_t refill_lock; /* Work struct for config space updates */ struct work_struct config_work; /* Does the affinity hint is set for virtqueues? */ bool affinity_hint_set; /* CPU hotplug instances for online & dead */ struct hlist_node node; struct hlist_node node_dead; struct control_buf *ctrl; /* Ethtool settings */ u8 duplex; u32 speed; /* Interrupt coalescing settings */ struct virtnet_interrupt_coalesce intr_coal_tx; struct virtnet_interrupt_coalesce intr_coal_rx; unsigned long guest_offloads; unsigned long guest_offloads_capable; /* failover when STANDBY feature enabled */ struct failover *failover; }; struct padded_vnet_hdr { struct virtio_net_hdr_v1_hash hdr; /* * hdr is in a separate sg buffer, and data sg buffer shares same page * with this header sg. This padding makes next sg 16 byte aligned * after the header. */ char padding[12]; }; struct virtio_net_common_hdr { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mrg_hdr; struct virtio_net_hdr_v1_hash hash_v1_hdr; }; }; static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); static bool is_xdp_frame(void *ptr) { return (unsigned long)ptr & VIRTIO_XDP_FLAG; } static void *xdp_to_ptr(struct xdp_frame *ptr) { return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); } static struct xdp_frame *ptr_to_xdp(void *ptr) { return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); } /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ static int vq2txq(struct virtqueue *vq) { return (vq->index - 1) / 2; } static int txq2vq(int txq) { return txq * 2 + 1; } static int vq2rxq(struct virtqueue *vq) { return vq->index / 2; } static int rxq2vq(int rxq) { return rxq * 2; } static inline struct virtio_net_common_hdr * skb_vnet_common_hdr(struct sk_buff *skb) { return (struct virtio_net_common_hdr *)skb->cb; } /* * private is used to chain pages for big packets, put the whole * most recent used list in the beginning for reuse */ static void give_pages(struct receive_queue *rq, struct page *page) { struct page *end; /* Find end of list, sew whole thing into vi->rq.pages. */ for (end = page; end->private; end = (struct page *)end->private); end->private = (unsigned long)rq->pages; rq->pages = page; } static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) { struct page *p = rq->pages; if (p) { rq->pages = (struct page *)p->private; /* clear private here, it is used to chain pages */ p->private = 0; } else p = alloc_page(gfp_mask); return p; } static void enable_delayed_refill(struct virtnet_info *vi) { spin_lock_bh(&vi->refill_lock); vi->refill_enabled = true; spin_unlock_bh(&vi->refill_lock); } static void disable_delayed_refill(struct virtnet_info *vi) { spin_lock_bh(&vi->refill_lock); vi->refill_enabled = false; spin_unlock_bh(&vi->refill_lock); } static void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq) { if (napi_schedule_prep(napi)) { virtqueue_disable_cb(vq); __napi_schedule(napi); } } static void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq, int processed) { int opaque; opaque = virtqueue_enable_cb_prepare(vq); if (napi_complete_done(napi, processed)) { if (unlikely(virtqueue_poll(vq, opaque))) virtqueue_napi_schedule(napi, vq); } else { virtqueue_disable_cb(vq); } } static void skb_xmit_done(struct virtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; /* Suppress further interrupts. */ virtqueue_disable_cb(vq); if (napi->weight) virtqueue_napi_schedule(napi, vq); else /* We were probably waiting for more output buffers. */ netif_wake_subqueue(vi->dev, vq2txq(vq)); } #define MRG_CTX_HEADER_SHIFT 22 static void *mergeable_len_to_ctx(unsigned int truesize, unsigned int headroom) { return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); } static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) { return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; } static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) { return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); } static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, unsigned int headroom, unsigned int len) { struct sk_buff *skb; skb = build_skb(buf, buflen); if (unlikely(!skb)) return NULL; skb_reserve(skb, headroom); skb_put(skb, len); return skb; } /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize, unsigned int headroom) { struct sk_buff *skb; struct virtio_net_common_hdr *hdr; unsigned int copy, hdr_len, hdr_padded_len; struct page *page_to_free = NULL; int tailroom, shinfo_size; char *p, *hdr_p, *buf; p = page_address(page) + offset; hdr_p = p; hdr_len = vi->hdr_len; if (vi->mergeable_rx_bufs) hdr_padded_len = hdr_len; else hdr_padded_len = sizeof(struct padded_vnet_hdr); buf = p - headroom; len -= hdr_len; offset += hdr_padded_len; p += hdr_padded_len; tailroom = truesize - headroom - hdr_padded_len - len; shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /* copy small packet so we can reuse these pages */ if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { skb = virtnet_build_skb(buf, truesize, p - buf, len); if (unlikely(!skb)) return NULL; page = (struct page *)page->private; if (page) give_pages(rq, page); goto ok; } /* copy small packet so we can reuse these pages for small data */ skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); if (unlikely(!skb)) return NULL; /* Copy all frame if it fits skb->head, otherwise * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. */ if (len <= skb_tailroom(skb)) copy = len; else copy = ETH_HLEN; skb_put_data(skb, p, copy); len -= copy; offset += copy; if (vi->mergeable_rx_bufs) { if (len) skb_add_rx_frag(skb, 0, page, offset, len, truesize); else page_to_free = page; goto ok; } /* * Verify that we can indeed put this data into a skb. * This is here to handle cases when the device erroneously * tries to receive more than is possible. This is usually * the case of a broken device. */ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { net_dbg_ratelimited("%s: too much data\n", skb->dev->name); dev_kfree_skb(skb); return NULL; } BUG_ON(offset >= PAGE_SIZE); while (len) { unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, frag_size, truesize); len -= frag_size; page = (struct page *)page->private; offset = 0; } if (page) give_pages(rq, page); ok: hdr = skb_vnet_common_hdr(skb); memcpy(hdr, hdr_p, hdr_len); if (page_to_free) put_page(page_to_free); return skb; } static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) { struct page *page = virt_to_head_page(buf); struct virtnet_rq_dma *dma; void *head; int offset; head = page_address(page); dma = head; --dma->ref; if (dma->ref) { if (dma->need_sync && len) { offset = buf - (head + sizeof(*dma)); virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset, len, DMA_FROM_DEVICE); } return; } virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); put_page(page); } static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) { void *buf; buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); if (buf && rq->do_dma) virtnet_rq_unmap(rq, buf, *len); return buf; } static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq) { void *buf; buf = virtqueue_detach_unused_buf(rq->vq); if (buf && rq->do_dma) virtnet_rq_unmap(rq, buf, 0); return buf; } static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) { struct virtnet_rq_dma *dma; dma_addr_t addr; u32 offset; void *head; if (!rq->do_dma) { sg_init_one(rq->sg, buf, len); return; } head = page_address(rq->alloc_frag.page); offset = buf - head; dma = head; addr = dma->addr - sizeof(*dma) + offset; sg_init_table(rq->sg, 1); rq->sg[0].dma_address = addr; rq->sg[0].length = len; } static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) { struct page_frag *alloc_frag = &rq->alloc_frag; struct virtnet_rq_dma *dma; void *buf, *head; dma_addr_t addr; if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp))) return NULL; head = page_address(alloc_frag->page); if (rq->do_dma) { dma = head; /* new pages */ if (!alloc_frag->offset) { if (rq->last_dma) { /* Now, the new page is allocated, the last dma * will not be used. So the dma can be unmapped * if the ref is 0. */ virtnet_rq_unmap(rq, rq->last_dma, 0); rq->last_dma = NULL; } dma->len = alloc_frag->size - sizeof(*dma); addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, dma->len, DMA_FROM_DEVICE, 0); if (virtqueue_dma_mapping_error(rq->vq, addr)) return NULL; dma->addr = addr; dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); /* Add a reference to dma to prevent the entire dma from * being released during error handling. This reference * will be freed after the pages are no longer used. */ get_page(alloc_frag->page); dma->ref = 1; alloc_frag->offset = sizeof(*dma); rq->last_dma = dma; } ++dma->ref; } buf = head + alloc_frag->offset; get_page(alloc_frag->page); alloc_frag->offset += size; return buf; } static void virtnet_rq_set_premapped(struct virtnet_info *vi) { int i; /* disable for big mode */ if (!vi->mergeable_rx_bufs && vi->big_packets) return; for (i = 0; i < vi->max_queue_pairs; i++) { if (virtqueue_set_dma_premapped(vi->rq[i].vq)) continue; vi->rq[i].do_dma = true; } } static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { unsigned int len; unsigned int packets = 0; unsigned int bytes = 0; void *ptr; while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { if (likely(!is_xdp_frame(ptr))) { struct sk_buff *skb = ptr; pr_debug("Sent skb %p\n", skb); bytes += skb->len; napi_consume_skb(skb, in_napi); } else { struct xdp_frame *frame = ptr_to_xdp(ptr); bytes += xdp_get_frame_len(frame); xdp_return_frame(frame); } packets++; } /* Avoid overhead when no packets have been processed * happens when called speculatively from start_xmit. */ if (!packets) return; u64_stats_update_begin(&sq->stats.syncp); sq->stats.bytes += bytes; sq->stats.packets += packets; u64_stats_update_end(&sq->stats.syncp); } static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) { if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) return false; else if (q < vi->curr_queue_pairs) return true; else return false; } static void check_sq_full_and_disable(struct virtnet_info *vi, struct net_device *dev, struct send_queue *sq) { bool use_napi = sq->napi.weight; int qnum; qnum = sq - vi->sq; /* If running out of space, stop queue to avoid getting packets that we * are then unable to transmit. * An alternative would be to force queuing layer to requeue the skb by * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be * returned in a normal path of operation: it means that driver is not * maintaining the TX queue stop/start state properly, and causes * the stack to do a non-trivial amount of useless work. * Since most packets only take 1 or 2 ring slots, stopping the queue * early means 16 slots are typically wasted. */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); if (use_napi) { if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) virtqueue_napi_schedule(&sq->napi, sq->vq); } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(sq, false); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); } } } } static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct send_queue *sq, struct xdp_frame *xdpf) { struct virtio_net_hdr_mrg_rxbuf *hdr; struct skb_shared_info *shinfo; u8 nr_frags = 0; int err, i; if (unlikely(xdpf->headroom < vi->hdr_len)) return -EOVERFLOW; if (unlikely(xdp_frame_has_frags(xdpf))) { shinfo = xdp_get_shared_info_from_frame(xdpf); nr_frags = shinfo->nr_frags; } /* In wrapping function virtnet_xdp_xmit(), we need to free * up the pending old buffers, where we need to calculate the * position of skb_shared_info in xdp_get_frame_len() and * xdp_return_frame(), which will involve to xdpf->data and * xdpf->headroom. Therefore, we need to update the value of * headroom synchronously here. */ xdpf->headroom -= vi->hdr_len; xdpf->data -= vi->hdr_len; /* Zero header and leave csum up to XDP layers */ hdr = xdpf->data; memset(hdr, 0, vi->hdr_len); xdpf->len += vi->hdr_len; sg_init_table(sq->sg, nr_frags + 1); sg_set_buf(sq->sg, xdpf->data, xdpf->len); for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &shinfo->frags[i]; sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), skb_frag_size(frag), skb_frag_off(frag)); } err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1, xdp_to_ptr(xdpf), GFP_ATOMIC); if (unlikely(err)) return -ENOSPC; /* Caller handle free/refcnt */ return 0; } /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on * the current cpu, so it does not need to be locked. * * Here we use marco instead of inline functions because we have to deal with * three issues at the same time: 1. the choice of sq. 2. judge and execute the * lock/unlock of txq 3. make sparse happy. It is difficult for two inline * functions to perfectly solve these three problems at the same time. */ #define virtnet_xdp_get_sq(vi) ({ \ int cpu = smp_processor_id(); \ struct netdev_queue *txq; \ typeof(vi) v = (vi); \ unsigned int qp; \ \ if (v->curr_queue_pairs > nr_cpu_ids) { \ qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ qp += cpu; \ txq = netdev_get_tx_queue(v->dev, qp); \ __netif_tx_acquire(txq); \ } else { \ qp = cpu % v->curr_queue_pairs; \ txq = netdev_get_tx_queue(v->dev, qp); \ __netif_tx_lock(txq, cpu); \ } \ v->sq + qp; \ }) #define virtnet_xdp_put_sq(vi, q) { \ struct netdev_queue *txq; \ typeof(vi) v = (vi); \ \ txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ if (v->curr_queue_pairs > nr_cpu_ids) \ __netif_tx_release(txq); \ else \ __netif_tx_unlock(txq); \ } static int virtnet_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; struct bpf_prog *xdp_prog; struct send_queue *sq; unsigned int len; int packets = 0; int bytes = 0; int nxmit = 0; int kicks = 0; void *ptr; int ret; int i; /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. */ xdp_prog = rcu_access_pointer(rq->xdp_prog); if (!xdp_prog) return -ENXIO; sq = virtnet_xdp_get_sq(vi); if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { ret = -EINVAL; goto out; } /* Free up any pending old buffers before queueing new ones. */ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { if (likely(is_xdp_frame(ptr))) { struct xdp_frame *frame = ptr_to_xdp(ptr); bytes += xdp_get_frame_len(frame); xdp_return_frame(frame); } else { struct sk_buff *skb = ptr; bytes += skb->len; napi_consume_skb(skb, false); } packets++; } for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) break; nxmit++; } ret = nxmit; if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) check_sq_full_and_disable(vi, dev, sq); if (flags & XDP_XMIT_FLUSH) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) kicks = 1; } out: u64_stats_update_begin(&sq->stats.syncp); sq->stats.bytes += bytes; sq->stats.packets += packets; sq->stats.xdp_tx += n; sq->stats.xdp_tx_drops += n - nxmit; sq->stats.kicks += kicks; u64_stats_update_end(&sq->stats.syncp); virtnet_xdp_put_sq(vi, sq); return ret; } static void put_xdp_frags(struct xdp_buff *xdp) { struct skb_shared_info *shinfo; struct page *xdp_page; int i; if (xdp_buff_has_frags(xdp)) { shinfo = xdp_get_shared_info_from_buff(xdp); for (i = 0; i < shinfo->nr_frags; i++) { xdp_page = skb_frag_page(&shinfo->frags[i]); put_page(xdp_page); } } } static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, struct net_device *dev, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) { struct xdp_frame *xdpf; int err; u32 act; act = bpf_prog_run_xdp(xdp_prog, xdp); stats->xdp_packets++; switch (act) { case XDP_PASS: return act; case XDP_TX: stats->xdp_tx++; xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) { netdev_dbg(dev, "convert buff to frame failed for xdp\n"); return XDP_DROP; } err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); if (unlikely(!err)) { xdp_return_frame_rx_napi(xdpf); } else if (unlikely(err < 0)) { trace_xdp_exception(dev, xdp_prog, act); return XDP_DROP; } *xdp_xmit |= VIRTIO_XDP_TX; return act; case XDP_REDIRECT: stats->xdp_redirects++; err = xdp_do_redirect(dev, xdp, xdp_prog); if (err) return XDP_DROP; *xdp_xmit |= VIRTIO_XDP_REDIR; return act; default: bpf_warn_invalid_xdp_action(dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(dev, xdp_prog, act); fallthrough; case XDP_DROP: return XDP_DROP; } } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; } /* We copy the packet for XDP in the following cases: * * 1) Packet is scattered across multiple rx buffers. * 2) Headroom space is insufficient. * * This is inefficient but it's a temporary condition that * we hit right after XDP is enabled and until queue is refilled * with large buffers with sufficient headroom - so it should affect * at most queue size packets. * Afterwards, the conditions to enable * XDP should preclude the underlying device from sending packets * across multiple buffers (num_buf > 1), and we make sure buffers * have enough headroom. */ static struct page *xdp_linearize_page(struct receive_queue *rq, int *num_buf, struct page *p, int offset, int page_off, unsigned int *len) { int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); struct page *page; if (page_off + *len + tailroom > PAGE_SIZE) return NULL; page = alloc_page(GFP_ATOMIC); if (!page) return NULL; memcpy(page_address(page) + page_off, page_address(p) + offset, *len); page_off += *len; while (--*num_buf) { unsigned int buflen; void *buf; int off; buf = virtnet_rq_get_buf(rq, &buflen, NULL); if (unlikely(!buf)) goto err_buf; p = virt_to_head_page(buf); off = buf - page_address(p); /* guard against a misconfigured or uncooperative backend that * is sending packet larger than the MTU. */ if ((page_off + buflen + tailroom) > PAGE_SIZE) { put_page(p); goto err_buf; } memcpy(page_address(page) + page_off, page_address(p) + off, buflen); page_off += buflen; put_page(p); } /* Headroom does not contribute to packet length */ *len = page_off - VIRTIO_XDP_HEADROOM; return page; err_buf: __free_pages(page, 0); return NULL; } static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, unsigned int xdp_headroom, void *buf, unsigned int len) { unsigned int header_offset; unsigned int headroom; unsigned int buflen; struct sk_buff *skb; header_offset = VIRTNET_RX_PAD + xdp_headroom; headroom = vi->hdr_len + header_offset; buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); skb = virtnet_build_skb(buf, buflen, headroom, len); if (unlikely(!skb)) return NULL; buf += header_offset; memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); return skb; } static struct sk_buff *receive_small_xdp(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *buf, unsigned int xdp_headroom, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) { unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; unsigned int headroom = vi->hdr_len + header_offset; struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; struct page *page = virt_to_head_page(buf); struct page *xdp_page; unsigned int buflen; struct xdp_buff xdp; struct sk_buff *skb; unsigned int metasize = 0; u32 act; if (unlikely(hdr->hdr.gso_type)) goto err_xdp; buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { int offset = buf - page_address(page) + header_offset; unsigned int tlen = len + vi->hdr_len; int num_buf = 1; xdp_headroom = virtnet_get_headroom(vi); header_offset = VIRTNET_RX_PAD + xdp_headroom; headroom = vi->hdr_len + header_offset; buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); xdp_page = xdp_linearize_page(rq, &num_buf, page, offset, header_offset, &tlen); if (!xdp_page) goto err_xdp; buf = page_address(xdp_page); put_page(page); page = xdp_page; } xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, xdp_headroom, len, true); act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); switch (act) { case XDP_PASS: /* Recalculate length in case bpf program changed it */ len = xdp.data_end - xdp.data; metasize = xdp.data - xdp.data_meta; break; case XDP_TX: case XDP_REDIRECT: goto xdp_xmit; default: goto err_xdp; } skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); if (unlikely(!skb)) goto err; if (metasize) skb_metadata_set(skb, metasize); return skb; err_xdp: stats->xdp_drops++; err: stats->drops++; put_page(page); xdp_xmit: return NULL; } static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) { unsigned int xdp_headroom = (unsigned long)ctx; struct page *page = virt_to_head_page(buf); struct sk_buff *skb; len -= vi->hdr_len; stats->bytes += len; if (unlikely(len > GOOD_PACKET_LEN)) { pr_debug("%s: rx error: len %u exceeds max size %d\n", dev->name, len, GOOD_PACKET_LEN); dev->stats.rx_length_errors++; goto err; } if (unlikely(vi->xdp_enabled)) { struct bpf_prog *xdp_prog; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, xdp_headroom, len, xdp_xmit, stats); rcu_read_unlock(); return skb; } rcu_read_unlock(); } skb = receive_small_build_skb(vi, xdp_headroom, buf, len); if (likely(skb)) return skb; err: stats->drops++; put_page(page); return NULL; } static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, struct virtnet_rq_stats *stats) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; return skb; err: stats->drops++; give_pages(rq, page); return NULL; } static void mergeable_buf_free(struct receive_queue *rq, int num_buf, struct net_device *dev, struct virtnet_rq_stats *stats) { struct page *page; void *buf; int len; while (num_buf-- > 1) { buf = virtnet_rq_get_buf(rq, &len, NULL); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", dev->name, num_buf); dev->stats.rx_length_errors++; break; } stats->bytes += len; page = virt_to_head_page(buf); put_page(page); } } /* Why not use xdp_build_skb_from_frame() ? * XDP core assumes that xdp frags are PAGE_SIZE in length, while in * virtio-net there are 2 points that do not match its requirements: * 1. The size of the prefilled buffer is not fixed before xdp is set. * 2. xdp_build_skb_from_frame() does more checks that we don't need, * like eth_type_trans() (which virtio-net does in receive_buf()). */ static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev, struct virtnet_info *vi, struct xdp_buff *xdp, unsigned int xdp_frags_truesz) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); unsigned int headroom, data_len; struct sk_buff *skb; int metasize; u8 nr_frags; if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { pr_debug("Error building skb as missing reserved tailroom for xdp"); return NULL; } if (unlikely(xdp_buff_has_frags(xdp))) nr_frags = sinfo->nr_frags; skb = build_skb(xdp->data_hard_start, xdp->frame_sz); if (unlikely(!skb)) return NULL; headroom = xdp->data - xdp->data_hard_start; data_len = xdp->data_end - xdp->data; skb_reserve(skb, headroom); __skb_put(skb, data_len); metasize = xdp->data - xdp->data_meta; metasize = metasize > 0 ? metasize : 0; if (metasize) skb_metadata_set(skb, metasize); if (unlikely(xdp_buff_has_frags(xdp))) xdp_update_skb_shared_info(skb, nr_frags, sinfo->xdp_frags_size, xdp_frags_truesz, xdp_buff_is_frag_pfmemalloc(xdp)); return skb; } /* TODO: build xdp in big mode */ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct xdp_buff *xdp, void *buf, unsigned int len, unsigned int frame_sz, int *num_buf, unsigned int *xdp_frags_truesize, struct virtnet_rq_stats *stats) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; unsigned int headroom, tailroom, room; unsigned int truesize, cur_frag_size; struct skb_shared_info *shinfo; unsigned int xdp_frags_truesz = 0; struct page *page; skb_frag_t *frag; int offset; void *ctx; xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); if (!*num_buf) return 0; if (*num_buf > 1) { /* If we want to build multi-buffer xdp, we need * to specify that the flags of xdp_buff have the * XDP_FLAGS_HAS_FRAG bit. */ if (!xdp_buff_has_frags(xdp)) xdp_buff_set_frags_flag(xdp); shinfo = xdp_get_shared_info_from_buff(xdp); shinfo->nr_frags = 0; shinfo->xdp_frags_size = 0; } if (*num_buf > MAX_SKB_FRAGS + 1) return -EINVAL; while (--*num_buf > 0) { buf = virtnet_rq_get_buf(rq, &len, &ctx); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", dev->name, *num_buf, virtio16_to_cpu(vi->vdev, hdr->num_buffers)); dev->stats.rx_length_errors++; goto err; } stats->bytes += len; page = virt_to_head_page(buf); offset = buf - page_address(page); truesize = mergeable_ctx_to_truesize(ctx); headroom = mergeable_ctx_to_headroom(ctx); tailroom = headroom ? sizeof(struct skb_shared_info) : 0; room = SKB_DATA_ALIGN(headroom + tailroom); cur_frag_size = truesize; xdp_frags_truesz += cur_frag_size; if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { put_page(page); pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)(truesize - room)); dev->stats.rx_length_errors++; goto err; } frag = &shinfo->frags[shinfo->nr_frags++]; skb_frag_fill_page_desc(frag, page, offset, len); if (page_is_pfmemalloc(page)) xdp_buff_set_frag_pfmemalloc(xdp); shinfo->xdp_frags_size += len; } *xdp_frags_truesize = xdp_frags_truesz; return 0; err: put_xdp_frags(xdp); return -EINVAL; } static void *mergeable_xdp_get_buf(struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *ctx, unsigned int *frame_sz, int *num_buf, struct page **page, int offset, unsigned int *len, struct virtio_net_hdr_mrg_rxbuf *hdr) { unsigned int truesize = mergeable_ctx_to_truesize(ctx); unsigned int headroom = mergeable_ctx_to_headroom(ctx); struct page *xdp_page; unsigned int xdp_room; /* Transient failure which in theory could occur if * in-flight packets from before XDP was enabled reach * the receive path after XDP is loaded. */ if (unlikely(hdr->hdr.gso_type)) return NULL; /* Now XDP core assumes frag size is PAGE_SIZE, but buffers * with headroom may add hole in truesize, which * make their length exceed PAGE_SIZE. So we disabled the * hole mechanism for xdp. See add_recvbuf_mergeable(). */ *frame_sz = truesize; if (likely(headroom >= virtnet_get_headroom(vi) && (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { return page_address(*page) + offset; } /* This happens when headroom is not enough because * of the buffer was prefilled before XDP is set. * This should only happen for the first several packets. * In fact, vq reset can be used here to help us clean up * the prefilled buffers, but many existing devices do not * support it, and we don't want to bother users who are * using xdp normally. */ if (!xdp_prog->aux->xdp_has_frags) { /* linearize data for XDP */ xdp_page = xdp_linearize_page(rq, num_buf, *page, offset, VIRTIO_XDP_HEADROOM, len); if (!xdp_page) return NULL; } else { xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + sizeof(struct skb_shared_info)); if (*len + xdp_room > PAGE_SIZE) return NULL; xdp_page = alloc_page(GFP_ATOMIC); if (!xdp_page) return NULL; memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, page_address(*page) + offset, *len); } *frame_sz = PAGE_SIZE; put_page(*page); *page = xdp_page; return page_address(*page) + VIRTIO_XDP_HEADROOM; } static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); struct page *page = virt_to_head_page(buf); int offset = buf - page_address(page); unsigned int xdp_frags_truesz = 0; struct sk_buff *head_skb; unsigned int frame_sz; struct xdp_buff xdp; void *data; u32 act; int err; data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, offset, &len, hdr); if (unlikely(!data)) goto err_xdp; err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, &num_buf, &xdp_frags_truesz, stats); if (unlikely(err)) goto err_xdp; act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); switch (act) { case XDP_PASS: head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); if (unlikely(!head_skb)) break; return head_skb; case XDP_TX: case XDP_REDIRECT: return NULL; default: break; } put_xdp_frags(&xdp); err_xdp: put_page(page); mergeable_buf_free(rq, num_buf, dev, stats); stats->xdp_drops++; stats->drops++; return NULL; } static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); struct page *page = virt_to_head_page(buf); int offset = buf - page_address(page); struct sk_buff *head_skb, *curr_skb; unsigned int truesize = mergeable_ctx_to_truesize(ctx); unsigned int headroom = mergeable_ctx_to_headroom(ctx); unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); head_skb = NULL; stats->bytes += len - vi->hdr_len; if (unlikely(len > truesize - room)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)(truesize - room)); dev->stats.rx_length_errors++; goto err_skb; } if (unlikely(vi->xdp_enabled)) { struct bpf_prog *xdp_prog; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, len, xdp_xmit, stats); rcu_read_unlock(); return head_skb; } rcu_read_unlock(); } head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); curr_skb = head_skb; if (unlikely(!curr_skb)) goto err_skb; while (--num_buf) { int num_skb_frags; buf = virtnet_rq_get_buf(rq, &len, &ctx); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", dev->name, num_buf, virtio16_to_cpu(vi->vdev, hdr->num_buffers)); dev->stats.rx_length_errors++; goto err_buf; } stats->bytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); headroom = mergeable_ctx_to_headroom(ctx); tailroom = headroom ? sizeof(struct skb_shared_info) : 0; room = SKB_DATA_ALIGN(headroom + tailroom); if (unlikely(len > truesize - room)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)(truesize - room)); dev->stats.rx_length_errors++; goto err_skb; } num_skb_frags = skb_shinfo(curr_skb)->nr_frags; if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); if (unlikely(!nskb)) goto err_skb; if (curr_skb == head_skb) skb_shinfo(curr_skb)->frag_list = nskb; else curr_skb->next = nskb; curr_skb = nskb; head_skb->truesize += nskb->truesize; num_skb_frags = 0; } if (curr_skb != head_skb) { head_skb->data_len += len; head_skb->len += len; head_skb->truesize += truesize; } offset = buf - page_address(page); if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { put_page(page); skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, len, truesize); } else { skb_add_rx_frag(curr_skb, num_skb_frags, page, offset, len, truesize); } } ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); return head_skb; err_skb: put_page(page); mergeable_buf_free(rq, num_buf, dev, stats); err_buf: stats->drops++; dev_kfree_skb(head_skb); return NULL; } static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, struct sk_buff *skb) { enum pkt_hash_types rss_hash_type; if (!hdr_hash || !skb) return; switch (__le16_to_cpu(hdr_hash->hash_report)) { case VIRTIO_NET_HASH_REPORT_TCPv4: case VIRTIO_NET_HASH_REPORT_UDPv4: case VIRTIO_NET_HASH_REPORT_TCPv6: case VIRTIO_NET_HASH_REPORT_UDPv6: case VIRTIO_NET_HASH_REPORT_TCPv6_EX: case VIRTIO_NET_HASH_REPORT_UDPv6_EX: rss_hash_type = PKT_HASH_TYPE_L4; break; case VIRTIO_NET_HASH_REPORT_IPv4: case VIRTIO_NET_HASH_REPORT_IPv6: case VIRTIO_NET_HASH_REPORT_IPv6_EX: rss_hash_type = PKT_HASH_TYPE_L3; break; case VIRTIO_NET_HASH_REPORT_NONE: default: rss_hash_type = PKT_HASH_TYPE_NONE; } skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); } static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) { struct net_device *dev = vi->dev; struct sk_buff *skb; struct virtio_net_common_hdr *hdr; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; virtnet_rq_free_unused_buf(rq->vq, buf); return; } if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); else if (vi->big_packets) skb = receive_big(dev, vi, rq, buf, len, stats); else skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); if (unlikely(!skb)) return; hdr = skb_vnet_common_hdr(skb); if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; if (virtio_net_hdr_to_skb(skb, &hdr->hdr, virtio_is_little_endian(vi->vdev))) { net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", dev->name, hdr->hdr.gso_type, hdr->hdr.gso_size); goto frame_err; } skb_record_rx_queue(skb, vq2rxq(rq->vq)); skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); napi_gro_receive(&rq->napi, skb); return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); } /* Unlike mergeable buffers, all buffers are allocated to the * same size, except for the headroom. For this reason we do * not need to use mergeable_len_to_ctx here - it is enough * to store the headroom as the context ignoring the truesize. */ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { char *buf; unsigned int xdp_headroom = virtnet_get_headroom(vi); void *ctx = (void *)(unsigned long)xdp_headroom; int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; int err; len = SKB_DATA_ALIGN(len) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); buf = virtnet_rq_alloc(rq, len, gfp); if (unlikely(!buf)) return -ENOMEM; virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, vi->hdr_len + GOOD_PACKET_LEN); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { if (rq->do_dma) virtnet_rq_unmap(rq, buf, 0); put_page(virt_to_head_page(buf)); } return err; } static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { struct page *first, *list = NULL; char *p; int i, err, offset; sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { first = get_a_page(rq, gfp); if (!first) { if (list) give_pages(rq, list); return -ENOMEM; } sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); /* chain new page in list head to match sg */ first->private = (unsigned long)list; list = first; } first = get_a_page(rq, gfp); if (!first) { give_pages(rq, list); return -ENOMEM; } p = page_address(first); /* rq->sg[0], rq->sg[1] share the same page */ /* a separated rq->sg[0] for header - required in case !any_header_sg */ sg_set_buf(&rq->sg[0], p, vi->hdr_len); /* rq->sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); /* chain first in list head */ first->private = (unsigned long)list; err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, first, gfp); if (err < 0) give_pages(rq, first); return err; } static unsigned int get_mergeable_buf_len(struct receive_queue *rq, struct ewma_pkt_len *avg_pkt_len, unsigned int room) { struct virtnet_info *vi = rq->vq->vdev->priv; const size_t hdr_len = vi->hdr_len; unsigned int len; if (room) return PAGE_SIZE - room; len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), rq->min_buf_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } static int add_recvbuf_mergeable(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { struct page_frag *alloc_frag = &rq->alloc_frag; unsigned int headroom = virtnet_get_headroom(vi); unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); unsigned int len, hole; void *ctx; char *buf; int err; /* Extra tailroom is needed to satisfy XDP's assumption. This * means rx frags coalescing won't work, but consider we've * disabled GSO for XDP, it won't be a big issue. */ len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); buf = virtnet_rq_alloc(rq, len + room, gfp); if (unlikely(!buf)) return -ENOMEM; buf += headroom; /* advance address leaving hole at front of pkt */ hole = alloc_frag->size - alloc_frag->offset; if (hole < len + room) { /* To avoid internal fragmentation, if there is very likely not * enough space for another buffer, add the remaining space to * the current buffer. * XDP core assumes that frame_size of xdp_buff and the length * of the frag are PAGE_SIZE, so we disable the hole mechanism. */ if (!headroom) len += hole; alloc_frag->offset += hole; } virtnet_rq_init_one_sg(rq, buf, len); ctx = mergeable_len_to_ctx(len + room, headroom); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { if (rq->do_dma) virtnet_rq_unmap(rq, buf, 0); put_page(virt_to_head_page(buf)); } return err; } /* * Returns false if we couldn't fill entirely (OOM). * * Normally run in the receive path, but can also be run from ndo_open * before we're receiving packets, or from refill_work which is * careful to disable receiving (using napi_disable). */ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { int err; bool oom; do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(vi, rq, gfp); else if (vi->big_packets) err = add_recvbuf_big(vi, rq, gfp); else err = add_recvbuf_small(vi, rq, gfp); oom = err == -ENOMEM; if (err) break; } while (rq->vq->num_free); if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { unsigned long flags; flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); rq->stats.kicks++; u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); } return !oom; } static void skb_recv_done(struct virtqueue *rvq) { struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; virtqueue_napi_schedule(&rq->napi, rvq); } static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) { napi_enable(napi); /* If all buffers were filled by other side before we napi_enabled, we * won't get another interrupt, so process any outstanding packets now. * Call local_bh_enable after to trigger softIRQ processing. */ local_bh_disable(); virtqueue_napi_schedule(napi, vq); local_bh_enable(); } static void virtnet_napi_tx_enable(struct virtnet_info *vi, struct virtqueue *vq, struct napi_struct *napi) { if (!napi->weight) return; /* Tx napi touches cachelines on the cpu handling tx interrupts. Only * enable the feature if this is likely affine with the transmit path. */ if (!vi->affinity_hint_set) { napi->weight = 0; return; } return virtnet_napi_enable(vq, napi); } static void virtnet_napi_tx_disable(struct napi_struct *napi) { if (napi->weight) napi_disable(napi); } static void refill_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, refill.work); bool still_empty; int i; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; napi_disable(&rq->napi); still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); virtnet_napi_enable(rq->vq, &rq->napi); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) schedule_delayed_work(&vi->refill, HZ/2); } } static int virtnet_receive(struct receive_queue *rq, int budget, unsigned int *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_rq_stats stats = {}; unsigned int len; void *buf; int i; if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; while (stats.packets < budget && (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); stats.packets++; } } else { while (stats.packets < budget && (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) { receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); stats.packets++; } } if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { spin_lock(&vi->refill_lock); if (vi->refill_enabled) schedule_delayed_work(&vi->refill, 0); spin_unlock(&vi->refill_lock); } } u64_stats_update_begin(&rq->stats.syncp); for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { size_t offset = virtnet_rq_stats_desc[i].offset; u64 *item; item = (u64 *)((u8 *)&rq->stats + offset); *item += *(u64 *)((u8 *)&stats + offset); } u64_stats_update_end(&rq->stats.syncp); return stats.packets; } static void virtnet_poll_cleantx(struct receive_queue *rq) { struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int index = vq2rxq(rq->vq); struct send_queue *sq = &vi->sq[index]; struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) return; if (__netif_tx_trylock(txq)) { if (sq->reset) { __netif_tx_unlock(txq); return; } do { virtqueue_disable_cb(sq->vq); free_old_xmit_skbs(sq, true); } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } } static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; struct send_queue *sq; unsigned int received; unsigned int xdp_xmit = 0; virtnet_poll_cleantx(rq); received = virtnet_receive(rq, budget, &xdp_xmit); if (xdp_xmit & VIRTIO_XDP_REDIR) xdp_do_flush(); /* Out of packets? */ if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); if (xdp_xmit & VIRTIO_XDP_TX) { sq = virtnet_xdp_get_sq(vi); if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); } virtnet_xdp_put_sq(vi, sq); } return received; } static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) { virtnet_napi_tx_disable(&vi->sq[qp_index].napi); napi_disable(&vi->rq[qp_index].napi); xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); } static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) { struct net_device *dev = vi->dev; int err; err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, vi->rq[qp_index].napi.napi_id); if (err < 0) return err; err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err < 0) goto err_xdp_reg_mem_model; virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); return 0; err_xdp_reg_mem_model: xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); return err; } static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i, err; enable_delayed_refill(vi); for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); err = virtnet_enable_queue_pair(vi, i); if (err < 0) goto err_enable_qp; } return 0; err_enable_qp: disable_delayed_refill(vi); cancel_delayed_work_sync(&vi->refill); for (i--; i >= 0; i--) virtnet_disable_queue_pair(vi, i); return err; } static int virtnet_poll_tx(struct napi_struct *napi, int budget) { struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; unsigned int index = vq2txq(sq->vq); struct netdev_queue *txq; int opaque; bool done; if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { /* We don't need to enable cb for XDP */ napi_complete_done(napi, 0); return 0; } txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); virtqueue_disable_cb(sq->vq); free_old_xmit_skbs(sq, true); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) netif_tx_wake_queue(txq); opaque = virtqueue_enable_cb_prepare(sq->vq); done = napi_complete_done(napi, 0); if (!done) virtqueue_disable_cb(sq->vq); __netif_tx_unlock(txq); if (done) { if (unlikely(virtqueue_poll(sq->vq, opaque))) { if (napi_schedule_prep(napi)) { __netif_tx_lock(txq, raw_smp_processor_id()); virtqueue_disable_cb(sq->vq); __netif_tx_unlock(txq); __napi_schedule(napi); } } } return 0; } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) { struct virtio_net_hdr_mrg_rxbuf *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; int num_sg; unsigned hdr_len = vi->hdr_len; bool can_push; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); can_push = vi->any_header_sg && !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; /* Even if we can, don't push here yet as this would skew * csum_start offset below. */ if (can_push) hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); else hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; if (virtio_net_hdr_from_skb(skb, &hdr->hdr, virtio_is_little_endian(vi->vdev), false, 0)) return -EPROTO; if (vi->mergeable_rx_bufs) hdr->num_buffers = 0; sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); if (unlikely(num_sg < 0)) return num_sg; /* Pull header back to avoid skew in tx bytes calculations. */ __skb_pull(skb, hdr_len); } else { sg_set_buf(sq->sg, hdr, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); if (unlikely(num_sg < 0)) return num_sg; num_sg++; } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int qnum = skb_get_queue_mapping(skb); struct send_queue *sq = &vi->sq[qnum]; int err; struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); bool kick = !netdev_xmit_more(); bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ do { if (use_napi) virtqueue_disable_cb(sq->vq); free_old_xmit_skbs(sq, false); } while (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq))); /* timestamp packet in software */ skb_tx_timestamp(skb); /* Try to transmit */ err = xmit_skb(sq, skb); /* This should not happen! */ if (unlikely(err)) { dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); dev->stats.tx_dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* Don't wait up for transmitted skbs to be freed. */ if (!use_napi) { skb_orphan(skb); nf_reset_ct(skb); } check_sq_full_and_disable(vi, dev, sq); if (kick || netif_xmit_stopped(txq)) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); } } return NETDEV_TX_OK; } static int virtnet_rx_resize(struct virtnet_info *vi, struct receive_queue *rq, u32 ring_num) { bool running = netif_running(vi->dev); int err, qindex; qindex = rq - vi->rq; if (running) napi_disable(&rq->napi); err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); if (err) netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); if (!try_fill_recv(vi, rq, GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); if (running) virtnet_napi_enable(rq->vq, &rq->napi); return err; } static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, u32 ring_num) { bool running = netif_running(vi->dev); struct netdev_queue *txq; int err, qindex; qindex = sq - vi->sq; if (running) virtnet_napi_tx_disable(&sq->napi); txq = netdev_get_tx_queue(vi->dev, qindex); /* 1. wait all ximt complete * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue() */ __netif_tx_lock_bh(txq); /* Prevent rx poll from accessing sq. */ sq->reset = true; /* Prevent the upper layer from trying to send packets. */ netif_stop_subqueue(vi->dev, qindex); __netif_tx_unlock_bh(txq); err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); if (err) netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); __netif_tx_lock_bh(txq); sq->reset = false; netif_tx_wake_queue(txq); __netif_tx_unlock_bh(txq); if (running) virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); return err; } /* * Send command via the control virtqueue and check status. Commands * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formatted. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *out) { struct scatterlist *sgs[4], hdr, stat; unsigned out_num = 0, tmp; int ret; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); vi->ctrl->status = ~0; vi->ctrl->hdr.class = class; vi->ctrl->hdr.cmd = cmd; /* Add header */ sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; /* Add return status. */ sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); if (ret < 0) { dev_warn(&vi->vdev->dev, "Failed to add sgs for command vq: %d\n.", ret); return false; } if (unlikely(!virtqueue_kick(vi->cvq))) return vi->ctrl->status == VIRTIO_NET_OK; /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. */ while (!virtqueue_get_buf(vi->cvq, &tmp) && !virtqueue_is_broken(vi->cvq)) cpu_relax(); return vi->ctrl->status == VIRTIO_NET_OK; } static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; struct sockaddr *addr; struct scatterlist sg; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) return -EOPNOTSUPP; addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); if (!addr) return -ENOMEM; ret = eth_prepare_mac_addr_change(dev, addr); if (ret) goto out; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { sg_init_one(&sg, addr->sa_data, dev->addr_len); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { dev_warn(&vdev->dev, "Failed to set mac address by vq command.\n"); ret = -EINVAL; goto out; } } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { unsigned int i; /* Naturally, this has an atomicity problem. */ for (i = 0; i < dev->addr_len; i++) virtio_cwrite8(vdev, offsetof(struct virtio_net_config, mac) + i, addr->sa_data[i]); } eth_commit_mac_addr_change(dev, p); ret = 0; out: kfree(addr); return ret; } static void virtnet_stats(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); unsigned int start; int i; for (i = 0; i < vi->max_queue_pairs; i++) { u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; struct receive_queue *rq = &vi->rq[i]; struct send_queue *sq = &vi->sq[i]; do { start = u64_stats_fetch_begin(&sq->stats.syncp); tpackets = sq->stats.packets; tbytes = sq->stats.bytes; terrors = sq->stats.tx_timeouts; } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); do { start = u64_stats_fetch_begin(&rq->stats.syncp); rpackets = rq->stats.packets; rbytes = rq->stats.bytes; rdrops = rq->stats.drops; } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; tot->rx_dropped += rdrops; tot->tx_errors += terrors; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; } static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); rtnl_unlock(); } static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", queue_pairs); return -EINVAL; } else { vi->curr_queue_pairs = queue_pairs; /* virtnet_open() will refill when device is going to up. */ if (dev->flags & IFF_UP) schedule_delayed_work(&vi->refill, 0); } return 0; } static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { int err; rtnl_lock(); err = _virtnet_set_queues(vi, queue_pairs); rtnl_unlock(); return err; } static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; /* Make sure NAPI doesn't schedule refill work */ disable_delayed_refill(vi); /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) virtnet_disable_queue_pair(vi, i); return 0; } static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; struct virtio_net_ctrl_mac *mac_data; struct netdev_hw_addr *ha; int uc_count; int mc_count; void *buf; int i; /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", vi->ctrl->promisc ? "en" : "dis"); sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", vi->ctrl->allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); /* MAC filter - use one buffer for both lists */ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; if (!buf) return; sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); i = 0; netdev_for_each_uc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[0], mac_data, sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); /* multicast list and count fill the end */ mac_data = (void *)&mac_data->macs[uc_count][0]; mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); kfree(buf); } static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); return 0; } static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); return 0; } static void virtnet_clean_affinity(struct virtnet_info *vi) { int i; if (vi->affinity_hint_set) { for (i = 0; i < vi->max_queue_pairs; i++) { virtqueue_set_affinity(vi->rq[i].vq, NULL); virtqueue_set_affinity(vi->sq[i].vq, NULL); } vi->affinity_hint_set = false; } } static void virtnet_set_affinity(struct virtnet_info *vi) { cpumask_var_t mask; int stragglers; int group_size; int i, j, cpu; int num_cpu; int stride; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { virtnet_clean_affinity(vi); return; } num_cpu = num_online_cpus(); stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); stragglers = num_cpu >= vi->curr_queue_pairs ? num_cpu % vi->curr_queue_pairs : 0; cpu = cpumask_first(cpu_online_mask); for (i = 0; i < vi->curr_queue_pairs; i++) { group_size = stride + (i < stragglers ? 1 : 0); for (j = 0; j < group_size; j++) { cpumask_set_cpu(cpu, mask); cpu = cpumask_next_wrap(cpu, cpu_online_mask, nr_cpu_ids, false); } virtqueue_set_affinity(vi->rq[i].vq, mask); virtqueue_set_affinity(vi->sq[i].vq, mask); __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); cpumask_clear(mask); } vi->affinity_hint_set = true; free_cpumask_var(mask); } static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) { struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, node); virtnet_set_affinity(vi); return 0; } static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) { struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, node_dead); virtnet_set_affinity(vi); return 0; } static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) { struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, node); virtnet_clean_affinity(vi); return 0; } static enum cpuhp_state virtionet_online; static int virtnet_cpu_notif_add(struct virtnet_info *vi) { int ret; ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); if (ret) return ret; ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, &vi->node_dead); if (!ret) return ret; cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); return ret; } static void virtnet_cpu_notif_remove(struct virtnet_info *vi) { cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, &vi->node_dead); } static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct virtnet_info *vi = netdev_priv(dev); ring->rx_max_pending = vi->rq[0].vq->num_max; ring->tx_max_pending = vi->sq[0].vq->num_max; ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); } static int virtnet_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct virtnet_info *vi = netdev_priv(dev); u32 rx_pending, tx_pending; struct receive_queue *rq; struct send_queue *sq; int i, err; if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); if (ring->rx_pending == rx_pending && ring->tx_pending == tx_pending) return 0; if (ring->rx_pending > vi->rq[0].vq->num_max) return -EINVAL; if (ring->tx_pending > vi->sq[0].vq->num_max) return -EINVAL; for (i = 0; i < vi->max_queue_pairs; i++) { rq = vi->rq + i; sq = vi->sq + i; if (ring->tx_pending != tx_pending) { err = virtnet_tx_resize(vi, sq, ring->tx_pending); if (err) return err; } if (ring->rx_pending != rx_pending) { err = virtnet_rx_resize(vi, rq, ring->rx_pending); if (err) return err; } } return 0; } static bool virtnet_commit_rss_command(struct virtnet_info *vi) { struct net_device *dev = vi->dev; struct scatterlist sgs[4]; unsigned int sg_buf_size; /* prepare sgs */ sg_init_table(sgs, 4); sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); sg_buf_size = vi->rss_key_size; sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); return false; } return true; } static void virtnet_init_default_rss(struct virtnet_info *vi) { u32 indir_val = 0; int i = 0; vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; vi->rss_hash_types_saved = vi->rss_hash_types_supported; vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size ? vi->rss_indir_table_size - 1 : 0; vi->ctrl->rss.unclassified_queue = 0; for (; i < vi->rss_indir_table_size; ++i) { indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); vi->ctrl->rss.indirection_table[i] = indir_val; } vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; vi->ctrl->rss.hash_key_length = vi->rss_key_size; netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); } static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) { info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { info->data = RXH_IP_SRC | RXH_IP_DST; } break; case TCP_V6_FLOW: if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { info->data = RXH_IP_SRC | RXH_IP_DST; } break; case UDP_V4_FLOW: if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { info->data = RXH_IP_SRC | RXH_IP_DST; } break; case UDP_V6_FLOW: if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { info->data = RXH_IP_SRC | RXH_IP_DST; } break; case IPV4_FLOW: if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) info->data = RXH_IP_SRC | RXH_IP_DST; break; case IPV6_FLOW: if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) info->data = RXH_IP_SRC | RXH_IP_DST; break; default: info->data = 0; break; } } static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) { u32 new_hashtypes = vi->rss_hash_types_saved; bool is_disable = info->data & RXH_DISCARD; bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); /* supports only 'sd', 'sdfn' and 'r' */ if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) return false; switch (info->flow_type) { case TCP_V4_FLOW: new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4); if (!is_disable) new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0); break; case UDP_V4_FLOW: new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4); if (!is_disable) new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0); break; case IPV4_FLOW: new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4; if (!is_disable) new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4; break; case TCP_V6_FLOW: new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6); if (!is_disable) new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0); break; case UDP_V6_FLOW: new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6); if (!is_disable) new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0); break; case IPV6_FLOW: new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6; if (!is_disable) new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6; break; default: /* unsupported flow */ return false; } /* if unsupported hashtype was set */ if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) return false; if (new_hashtypes != vi->rss_hash_types_saved) { vi->rss_hash_types_saved = new_hashtypes; vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; if (vi->dev->features & NETIF_F_RXHASH) return virtnet_commit_rss_command(vi); } return true; } static void virtnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); } /* TODO: Eliminate OOO packets during switching */ static int virtnet_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); u16 queue_pairs = channels->combined_count; int err; /* We don't support separate rx/tx channels. * We don't allow setting 'other' channels. */ if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) return -EINVAL; /* For now we don't support modifying channels while XDP is loaded * also when XDP is loaded all RX queues have XDP programs so we only * need to check a single RX queue. */ if (vi->rq[0].xdp_prog) return -EINVAL; cpus_read_lock(); err = _virtnet_set_queues(vi, queue_pairs); if (err) { cpus_read_unlock(); goto err; } virtnet_set_affinity(vi); cpus_read_unlock(); netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); err: return err; } static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct virtnet_info *vi = netdev_priv(dev); unsigned int i, j; u8 *p = data; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < vi->curr_queue_pairs; i++) { for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) ethtool_sprintf(&p, "rx_queue_%u_%s", i, virtnet_rq_stats_desc[j].desc); } for (i = 0; i < vi->curr_queue_pairs; i++) { for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) ethtool_sprintf(&p, "tx_queue_%u_%s", i, virtnet_sq_stats_desc[j].desc); } break; } } static int virtnet_get_sset_count(struct net_device *dev, int sset) { struct virtnet_info *vi = netdev_priv(dev); switch (sset) { case ETH_SS_STATS: return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN); default: return -EOPNOTSUPP; } } static void virtnet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct virtnet_info *vi = netdev_priv(dev); unsigned int idx = 0, start, i, j; const u8 *stats_base; size_t offset; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; stats_base = (u8 *)&rq->stats; do { start = u64_stats_fetch_begin(&rq->stats.syncp); for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { offset = virtnet_rq_stats_desc[j].offset; data[idx + j] = *(u64 *)(stats_base + offset); } } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); idx += VIRTNET_RQ_STATS_LEN; } for (i = 0; i < vi->curr_queue_pairs; i++) { struct send_queue *sq = &vi->sq[i]; stats_base = (u8 *)&sq->stats; do { start = u64_stats_fetch_begin(&sq->stats.syncp); for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { offset = virtnet_sq_stats_desc[j].offset; data[idx + j] = *(u64 *)(stats_base + offset); } } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); idx += VIRTNET_SQ_STATS_LEN; } } static void virtnet_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); channels->combined_count = vi->curr_queue_pairs; channels->max_combined = vi->max_queue_pairs; channels->max_other = 0; channels->rx_count = 0; channels->tx_count = 0; channels->other_count = 0; } static int virtnet_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct virtnet_info *vi = netdev_priv(dev); return ethtool_virtdev_set_link_ksettings(dev, cmd, &vi->speed, &vi->duplex); } static int virtnet_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct virtnet_info *vi = netdev_priv(dev); cmd->base.speed = vi->speed; cmd->base.duplex = vi->duplex; cmd->base.port = PORT_OTHER; return 0; } static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { struct scatterlist sgs_tx, sgs_rx; vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, &sgs_tx)) return -EINVAL; /* Save parameters */ vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, &sgs_rx)) return -EINVAL; /* Save parameters */ vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; return 0; } static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, u16 vqn, u32 max_usecs, u32 max_packets) { struct scatterlist sgs; vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, &sgs)) return -EINVAL; return 0; } static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec, u16 queue) { int err; if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) { err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), ec->rx_coalesce_usecs, ec->rx_max_coalesced_frames); if (err) return err; /* Save parameters */ vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs; vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames; } if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) { err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), ec->tx_coalesce_usecs, ec->tx_max_coalesced_frames); if (err) return err; /* Save parameters */ vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs; vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames; } return 0; } static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) { /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL * feature is negotiated. */ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) return -EOPNOTSUPP; if (ec->tx_max_coalesced_frames > 1 || ec->rx_max_coalesced_frames != 1) return -EINVAL; return 0; } static int virtnet_should_update_vq_weight(int dev_flags, int weight, int vq_weight, bool *should_update) { if (weight ^ vq_weight) { if (dev_flags & IFF_UP) return -EBUSY; *should_update = true; } return 0; } static int virtnet_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct virtnet_info *vi = netdev_priv(dev); int ret, queue_number, napi_weight; bool update_napi = false; /* Can't change NAPI weight if the link is up */ napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, vi->sq[queue_number].napi.weight, &update_napi); if (ret) return ret; if (update_napi) { /* All queues that belong to [queue_number, vi->max_queue_pairs] will be * updated for the sake of simplicity, which might not be necessary */ break; } } if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) ret = virtnet_send_notf_coal_cmds(vi, ec); else ret = virtnet_coal_params_supported(ec); if (ret) return ret; if (update_napi) { for (; queue_number < vi->max_queue_pairs; queue_number++) vi->sq[queue_number].napi.weight = napi_weight; } return ret; } static int virtnet_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct virtnet_info *vi = netdev_priv(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; } else { ec->rx_max_coalesced_frames = 1; if (vi->sq[0].napi.weight) ec->tx_max_coalesced_frames = 1; } return 0; } static int virtnet_set_per_queue_coalesce(struct net_device *dev, u32 queue, struct ethtool_coalesce *ec) { struct virtnet_info *vi = netdev_priv(dev); int ret, napi_weight; bool update_napi = false; if (queue >= vi->max_queue_pairs) return -EINVAL; /* Can't change NAPI weight if the link is up */ napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, vi->sq[queue].napi.weight, &update_napi); if (ret) return ret; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); else ret = virtnet_coal_params_supported(ec); if (ret) return ret; if (update_napi) vi->sq[queue].napi.weight = napi_weight; return 0; } static int virtnet_get_per_queue_coalesce(struct net_device *dev, u32 queue, struct ethtool_coalesce *ec) { struct virtnet_info *vi = netdev_priv(dev); if (queue >= vi->max_queue_pairs) return -EINVAL; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; } else { ec->rx_max_coalesced_frames = 1; if (vi->sq[0].napi.weight) ec->tx_max_coalesced_frames = 1; } return 0; } static void virtnet_init_settings(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); vi->speed = SPEED_UNKNOWN; vi->duplex = DUPLEX_UNKNOWN; } static void virtnet_update_settings(struct virtnet_info *vi) { u32 speed; u8 duplex; if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) return; virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); if (ethtool_validate_speed(speed)) vi->speed = speed; virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); if (ethtool_validate_duplex(duplex)) vi->duplex = duplex; } static u32 virtnet_get_rxfh_key_size(struct net_device *dev) { return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; } static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) { return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; } static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct virtnet_info *vi = netdev_priv(dev); int i; if (indir) { for (i = 0; i < vi->rss_indir_table_size; ++i) indir[i] = vi->ctrl->rss.indirection_table[i]; } if (key) memcpy(key, vi->ctrl->rss.key, vi->rss_key_size); if (hfunc) *hfunc = ETH_RSS_HASH_TOP; return 0; } static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct virtnet_info *vi = netdev_priv(dev); int i; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; if (indir) { for (i = 0; i < vi->rss_indir_table_size; ++i) vi->ctrl->rss.indirection_table[i] = indir[i]; } if (key) memcpy(vi->ctrl->rss.key, key, vi->rss_key_size); virtnet_commit_rss_command(vi); return 0; } static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct virtnet_info *vi = netdev_priv(dev); int rc = 0; switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = vi->curr_queue_pairs; break; case ETHTOOL_GRXFH: virtnet_get_hashflow(vi, info); break; default: rc = -EOPNOTSUPP; } return rc; } static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) { struct virtnet_info *vi = netdev_priv(dev); int rc = 0; switch (info->cmd) { case ETHTOOL_SRXFH: if (!virtnet_set_hashflow(vi, info)) rc = -EINVAL; break; default: rc = -EOPNOTSUPP; } return rc; } static const struct ethtool_ops virtnet_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS, .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, .set_ringparam = virtnet_set_ringparam, .get_strings = virtnet_get_strings, .get_sset_count = virtnet_get_sset_count, .get_ethtool_stats = virtnet_get_ethtool_stats, .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = virtnet_get_link_ksettings, .set_link_ksettings = virtnet_set_link_ksettings, .set_coalesce = virtnet_set_coalesce, .get_coalesce = virtnet_get_coalesce, .set_per_queue_coalesce = virtnet_set_per_queue_coalesce, .get_per_queue_coalesce = virtnet_get_per_queue_coalesce, .get_rxfh_key_size = virtnet_get_rxfh_key_size, .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, .get_rxfh = virtnet_get_rxfh, .set_rxfh = virtnet_set_rxfh, .get_rxnfc = virtnet_get_rxnfc, .set_rxnfc = virtnet_set_rxnfc, }; static void virtnet_freeze_down(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; /* Make sure no work handler is accessing the device */ flush_work(&vi->config_work); netif_tx_lock_bh(vi->dev); netif_device_detach(vi->dev); netif_tx_unlock_bh(vi->dev); if (netif_running(vi->dev)) virtnet_close(vi->dev); } static int init_vqs(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err; err = init_vqs(vi); if (err) return err; virtio_device_ready(vdev); enable_delayed_refill(vi); if (netif_running(vi->dev)) { err = virtnet_open(vi->dev); if (err) return err; } netif_tx_lock_bh(vi->dev); netif_device_attach(vi->dev); netif_tx_unlock_bh(vi->dev); return err; } static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { struct scatterlist sg; vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); return -EINVAL; } return 0; } static int virtnet_clear_guest_offloads(struct virtnet_info *vi) { u64 offloads = 0; if (!vi->guest_offloads) return 0; return virtnet_set_guest_offloads(vi, offloads); } static int virtnet_restore_guest_offloads(struct virtnet_info *vi) { u64 offloads = vi->guest_offloads; if (!vi->guest_offloads) return 0; return virtnet_set_guest_offloads(vi, offloads); } static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + sizeof(struct skb_shared_info)); unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; struct virtnet_info *vi = netdev_priv(dev); struct bpf_prog *old_prog; u16 xdp_qp = 0, curr_qp; int i, err; if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); return -EOPNOTSUPP; } if (vi->mergeable_rx_bufs && !vi->any_header_sg) { NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); return -EINVAL; } if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags"); netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); return -EINVAL; } curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; if (prog) xdp_qp = nr_cpu_ids; /* XDP requires extra queues for XDP_TX */ if (curr_qp + xdp_qp > vi->max_queue_pairs) { netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", curr_qp + xdp_qp, vi->max_queue_pairs); xdp_qp = 0; } old_prog = rtnl_dereference(vi->rq[0].xdp_prog); if (!prog && !old_prog) return 0; if (prog) bpf_prog_add(prog, vi->max_queue_pairs - 1); /* Make sure NAPI is not using any XDP TX queues for RX. */ if (netif_running(dev)) { for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); virtnet_napi_tx_disable(&vi->sq[i].napi); } } if (!prog) { for (i = 0; i < vi->max_queue_pairs; i++) { rcu_assign_pointer(vi->rq[i].xdp_prog, prog); if (i == 0) virtnet_restore_guest_offloads(vi); } synchronize_net(); } err = _virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) goto err; netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); vi->xdp_queue_pairs = xdp_qp; if (prog) { vi->xdp_enabled = true; for (i = 0; i < vi->max_queue_pairs; i++) { rcu_assign_pointer(vi->rq[i].xdp_prog, prog); if (i == 0 && !old_prog) virtnet_clear_guest_offloads(vi); } if (!old_prog) xdp_features_set_redirect_target(dev, true); } else { xdp_features_clear_redirect_target(dev); vi->xdp_enabled = false; } for (i = 0; i < vi->max_queue_pairs; i++) { if (old_prog) bpf_prog_put(old_prog); if (netif_running(dev)) { virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); } } return 0; err: if (!prog) { virtnet_clear_guest_offloads(vi); for (i = 0; i < vi->max_queue_pairs; i++) rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); } if (netif_running(dev)) { for (i = 0; i < vi->max_queue_pairs; i++) { virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); } } if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; } static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return virtnet_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { struct virtnet_info *vi = netdev_priv(dev); int ret; if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) return -EOPNOTSUPP; ret = snprintf(buf, len, "sby"); if (ret >= len) return -EOPNOTSUPP; return 0; } static int virtnet_set_features(struct net_device *dev, netdev_features_t features) { struct virtnet_info *vi = netdev_priv(dev); u64 offloads; int err; if ((dev->features ^ features) & NETIF_F_GRO_HW) { if (vi->xdp_enabled) return -EBUSY; if (features & NETIF_F_GRO_HW) offloads = vi->guest_offloads_capable; else offloads = vi->guest_offloads_capable & ~GUEST_OFFLOAD_GRO_HW_MASK; err = virtnet_set_guest_offloads(vi, offloads); if (err) return err; vi->guest_offloads = offloads; } if ((dev->features ^ features) & NETIF_F_RXHASH) { if (features & NETIF_F_RXHASH) vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; else vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; if (!virtnet_commit_rss_command(vi)) return -EINVAL; } return 0; } static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct virtnet_info *priv = netdev_priv(dev); struct send_queue *sq = &priv->sq[txqueue]; struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); u64_stats_update_begin(&sq->stats.syncp); sq->stats.tx_timeouts++; u64_stats_update_end(&sq->stats.syncp); netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", txqueue, sq->name, sq->vq->index, sq->vq->name, jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); } static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, .ndo_start_xmit = start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, .ndo_get_stats64 = virtnet_stats, .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, .ndo_bpf = virtnet_xdp, .ndo_xdp_xmit = virtnet_xdp_xmit, .ndo_features_check = passthru_features_check, .ndo_get_phys_port_name = virtnet_get_phys_port_name, .ndo_set_features = virtnet_set_features, .ndo_tx_timeout = virtnet_tx_timeout, }; static void virtnet_config_changed_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, config_work); u16 v; if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, struct virtio_net_config, status, &v) < 0) return; if (v & VIRTIO_NET_S_ANNOUNCE) { netdev_notify_peers(vi->dev); virtnet_ack_link_announce(vi); } /* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) return; vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { virtnet_update_settings(vi); netif_carrier_on(vi->dev); netif_tx_wake_all_queues(vi->dev); } else { netif_carrier_off(vi->dev); netif_tx_stop_all_queues(vi->dev); } } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; schedule_work(&vi->config_work); } static void virtnet_free_queues(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) { __netif_napi_del(&vi->rq[i].napi); __netif_napi_del(&vi->sq[i].napi); } /* We called __netif_napi_del(), * we need to respect an RCU grace period before freeing vi->rq */ synchronize_net(); kfree(vi->rq); kfree(vi->sq); kfree(vi->ctrl); } static void _free_receive_bufs(struct virtnet_info *vi) { struct bpf_prog *old_prog; int i; for (i = 0; i < vi->max_queue_pairs; i++) { while (vi->rq[i].pages) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); old_prog = rtnl_dereference(vi->rq[i].xdp_prog); RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); if (old_prog) bpf_prog_put(old_prog); } } static void free_receive_bufs(struct virtnet_info *vi) { rtnl_lock(); _free_receive_bufs(vi); rtnl_unlock(); } static void free_receive_page_frags(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) if (vi->rq[i].alloc_frag.page) { if (vi->rq[i].do_dma && vi->rq[i].last_dma) virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); put_page(vi->rq[i].alloc_frag.page); } } static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) { if (!is_xdp_frame(buf)) dev_kfree_skb(buf); else xdp_return_frame(ptr_to_xdp(buf)); } static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) { struct virtnet_info *vi = vq->vdev->priv; int i = vq2rxq(vq); if (vi->mergeable_rx_bufs) put_page(virt_to_head_page(buf)); else if (vi->big_packets) give_pages(&vi->rq[i], buf); else put_page(virt_to_head_page(buf)); } static void free_unused_bufs(struct virtnet_info *vi) { void *buf; int i; for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) virtnet_sq_free_unused_buf(vq, buf); cond_resched(); } for (i = 0; i < vi->max_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL) virtnet_rq_free_unused_buf(rq->vq, buf); cond_resched(); } } static void virtnet_del_vqs(struct virtnet_info *vi) { struct virtio_device *vdev = vi->vdev; virtnet_clean_affinity(vi); vdev->config->del_vqs(vdev); virtnet_free_queues(vi); } /* How large should a single buffer be so a queue full of these can fit at * least one full packet? * Logic below assumes the mergeable buffer header is used. */ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) { const unsigned int hdr_len = vi->hdr_len; unsigned int rq_size = virtqueue_get_vring_size(vq); unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); return max(max(min_buf_len, hdr_len) - hdr_len, (unsigned int)GOOD_PACKET_LEN); } static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; struct virtqueue **vqs; int ret = -ENOMEM; int i, total_vqs; const char **names; bool *ctx; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by * possible control vq. */ total_vqs = vi->max_queue_pairs * 2 + virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); /* Allocate space for find_vqs parameters */ vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); if (!vqs) goto err_vq; callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); if (!callbacks) goto err_callback; names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); if (!names) goto err_names; if (!vi->big_packets || vi->mergeable_rx_bufs) { ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); if (!ctx) goto err_ctx; } else { ctx = NULL; } /* Parameters for control virtqueue, if any */ if (vi->has_cvq) { callbacks[total_vqs - 1] = NULL; names[total_vqs - 1] = "control"; } /* Allocate/initialize parameters for send/receive virtqueues */ for (i = 0; i < vi->max_queue_pairs; i++) { callbacks[rxq2vq(i)] = skb_recv_done; callbacks[txq2vq(i)] = skb_xmit_done; sprintf(vi->rq[i].name, "input.%d", i); sprintf(vi->sq[i].name, "output.%d", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; if (ctx) ctx[rxq2vq(i)] = true; } ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, names, ctx, NULL); if (ret) goto err_find; if (vi->has_cvq) { vi->cvq = vqs[total_vqs - 1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; } for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].vq = vqs[rxq2vq(i)]; vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); vi->sq[i].vq = vqs[txq2vq(i)]; } /* run here: ret == 0. */ err_find: kfree(ctx); err_ctx: kfree(names); err_names: kfree(callbacks); err_callback: kfree(vqs); err_vq: return ret; } static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; if (vi->has_cvq) { vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); if (!vi->ctrl) goto err_ctrl; } else { vi->ctrl = NULL; } vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); if (!vi->sq) goto err_sq; vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); if (!vi->rq) goto err_rq; INIT_DELAYED_WORK(&vi->refill, refill_work); for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].pages = NULL; netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, napi_tx ? napi_weight : 0); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); u64_stats_init(&vi->rq[i].stats.syncp); u64_stats_init(&vi->sq[i].stats.syncp); } return 0; err_rq: kfree(vi->sq); err_sq: kfree(vi->ctrl); err_ctrl: return -ENOMEM; } static int init_vqs(struct virtnet_info *vi) { int ret; /* Allocate send & receive queues */ ret = virtnet_alloc_queues(vi); if (ret) goto err; ret = virtnet_find_vqs(vi); if (ret) goto err_free; virtnet_rq_set_premapped(vi); cpus_read_lock(); virtnet_set_affinity(vi); cpus_read_unlock(); return 0; err_free: virtnet_free_queues(vi); err: return ret; } #ifdef CONFIG_SYSFS static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, char *buf) { struct virtnet_info *vi = netdev_priv(queue->dev); unsigned int queue_index = get_netdev_rx_queue_index(queue); unsigned int headroom = virtnet_get_headroom(vi); unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; struct ewma_pkt_len *avg; BUG_ON(queue_index >= vi->max_queue_pairs); avg = &vi->rq[queue_index].mrg_avg_pkt_len; return sprintf(buf, "%u\n", get_mergeable_buf_len(&vi->rq[queue_index], avg, SKB_DATA_ALIGN(headroom + tailroom))); } static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = __ATTR_RO(mergeable_rx_buffer_size); static struct attribute *virtio_net_mrg_rx_attrs[] = { &mergeable_rx_buffer_size_attribute.attr, NULL }; static const struct attribute_group virtio_net_mrg_rx_group = { .name = "virtio_net", .attrs = virtio_net_mrg_rx_attrs }; #endif static bool virtnet_fail_on_feature(struct virtio_device *vdev, unsigned int fbit, const char *fname, const char *dname) { if (!virtio_has_feature(vdev, fbit)) return false; dev_err(&vdev->dev, "device advertises feature %s but not %s", fname, dname); return true; } #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) static bool virtnet_validate_features(struct virtio_device *vdev) { if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL, "VIRTIO_NET_F_CTRL_VQ"))) { return false; } return true; } #define MIN_MTU ETH_MIN_MTU #define MAX_MTU ETH_MAX_MTU static int virtnet_validate(struct virtio_device *vdev) { if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } if (!virtnet_validate_features(vdev)) return -EINVAL; if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { int mtu = virtio_cread16(vdev, offsetof(struct virtio_net_config, mtu)); if (mtu < MIN_MTU) __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); } if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) && !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby"); __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY); } return 0; } static bool virtnet_check_guest_gso(const struct virtnet_info *vi) { return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); } static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) { bool guest_gso = virtnet_check_guest_gso(vi); /* If device can receive ANY guest GSO packets, regardless of mtu, * allocate packets of maximum size, otherwise limit it to only * mtu size worth only. */ if (mtu > ETH_DATA_LEN || guest_gso) { vi->big_packets = true; vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); } } static int virtnet_probe(struct virtio_device *vdev) { int i, err = -ENOMEM; struct net_device *dev; struct virtnet_info *vi; u16 max_queue_pairs; int mtu = 0; /* Find if host supports multiqueue/rss virtio_net device */ max_queue_pairs = 1; if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) max_queue_pairs = virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); /* We need at least 2 queue's */ if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) max_queue_pairs = 1; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); if (!dev) return -ENOMEM; /* Set up network device as normal. */ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | IFF_TX_SKB_NO_LINEAR; dev->netdev_ops = &virtnet_netdev; dev->features = NETIF_F_HIGHDMA; dev->ethtool_ops = &virtnet_ethtool_ops; SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (csum) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->hw_features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO)) dev->hw_features |= NETIF_F_GSO_UDP_L4; dev->features |= NETIF_F_GSO_ROBUST; if (gso) dev->features |= dev->hw_features & NETIF_F_ALL_TSO; /* (!csum && gso) case will be fixed by register_netdev() */ } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) dev->features |= NETIF_F_RXCSUM; if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) dev->features |= NETIF_F_GRO_HW; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) dev->hw_features |= NETIF_F_GRO_HW; dev->vlan_features = dev->features; dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; /* MTU range: 68 - 65535 */ dev->min_mtu = MIN_MTU; dev->max_mtu = MAX_MTU; /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { u8 addr[ETH_ALEN]; virtio_cread_bytes(vdev, offsetof(struct virtio_net_config, mac), addr, ETH_ALEN); eth_hw_addr_set(dev, addr); } else { eth_hw_addr_random(dev); dev_info(&vdev->dev, "Assigned random MAC address %pM\n", dev->dev_addr); } /* Set up our device-specific information */ vi = netdev_priv(dev); vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; INIT_WORK(&vi->config_work, virtnet_config_changed_work); spin_lock_init(&vi->refill_lock); if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { vi->mergeable_rx_bufs = true; dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; } if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { vi->intr_coal_rx.max_usecs = 0; vi->intr_coal_tx.max_usecs = 0; vi->intr_coal_tx.max_packets = 0; vi->intr_coal_rx.max_packets = 0; } if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) vi->has_rss_hash_report = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) vi->has_rss = true; if (vi->has_rss || vi->has_rss_hash_report) { vi->rss_indir_table_size = virtio_cread16(vdev, offsetof(struct virtio_net_config, rss_max_indirection_table_length)); vi->rss_key_size = virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); vi->rss_hash_types_supported = virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); vi->rss_hash_types_supported &= ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); dev->hw_features |= NETIF_F_RXHASH; } if (vi->has_rss_hash_report) vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); else vi->hdr_len = sizeof(struct virtio_net_hdr); if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) vi->any_header_sg = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { mtu = virtio_cread16(vdev, offsetof(struct virtio_net_config, mtu)); if (mtu < dev->min_mtu) { /* Should never trigger: MTU was previously validated * in virtnet_validate. */ dev_err(&vdev->dev, "device MTU appears to have changed it is now %d < %d", mtu, dev->min_mtu); err = -EINVAL; goto free; } dev->mtu = mtu; dev->max_mtu = mtu; } virtnet_set_big_packets(vi, mtu); if (vi->any_header_sg) dev->needed_headroom = vi->hdr_len; /* Enable multiqueue by default */ if (num_online_cpus() >= max_queue_pairs) vi->curr_queue_pairs = max_queue_pairs; else vi->curr_queue_pairs = num_online_cpus(); vi->max_queue_pairs = max_queue_pairs; /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ err = init_vqs(vi); if (err) goto free; #ifdef CONFIG_SYSFS if (vi->mergeable_rx_bufs) dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; #endif netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); virtnet_init_settings(dev); if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { vi->failover = net_failover_create(vi->dev); if (IS_ERR(vi->failover)) { err = PTR_ERR(vi->failover); goto free_vqs; } } if (vi->has_rss || vi->has_rss_hash_report) virtnet_init_default_rss(vi); /* serialize netdev register + virtio_device_ready() with ndo_open() */ rtnl_lock(); err = register_netdevice(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); rtnl_unlock(); goto free_failover; } virtio_device_ready(vdev); _virtnet_set_queues(vi, vi->curr_queue_pairs); /* a random MAC address has been assigned, notify the device. * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there * because many devices work fine without getting MAC explicitly */ if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { struct scatterlist sg; sg_init_one(&sg, dev->dev_addr, dev->addr_len); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { pr_debug("virtio_net: setting MAC address failed\n"); rtnl_unlock(); err = -EINVAL; goto free_unregister_netdev; } } rtnl_unlock(); err = virtnet_cpu_notif_add(vi); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); goto free_unregister_netdev; } /* Assume link up if device can't report link status, otherwise get link status from config. */ netif_carrier_off(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; virtnet_update_settings(vi); netif_carrier_on(dev); } for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) if (virtio_has_feature(vi->vdev, guest_offloads[i])) set_bit(guest_offloads[i], &vi->guest_offloads); vi->guest_offloads_capable = vi->guest_offloads; pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); return 0; free_unregister_netdev: unregister_netdev(dev); free_failover: net_failover_destroy(vi->failover); free_vqs: virtio_reset_device(vdev); cancel_delayed_work_sync(&vi->refill); free_receive_page_frags(vi); virtnet_del_vqs(vi); free: free_netdev(dev); return err; } static void remove_vq_common(struct virtnet_info *vi) { virtio_reset_device(vi->vdev); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); free_receive_bufs(vi); free_receive_page_frags(vi); virtnet_del_vqs(vi); } static void virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; virtnet_cpu_notif_remove(vi); /* Make sure no work handler is accessing the device. */ flush_work(&vi->config_work); unregister_netdev(vi->dev); net_failover_destroy(vi->failover); remove_vq_common(vi); free_netdev(vi->dev); } static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; virtnet_cpu_notif_remove(vi); virtnet_freeze_down(vdev); remove_vq_common(vi); return 0; } static __maybe_unused int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err; err = virtnet_restore_up(vdev); if (err) return err; virtnet_set_queues(vi, vi->curr_queue_pairs); err = virtnet_cpu_notif_add(vi); if (err) { virtnet_freeze_down(vdev); remove_vq_common(vi); return err; } return 0; } static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; #define VIRTNET_FEATURES \ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ VIRTIO_NET_F_MAC, \ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ VIRTIO_NET_F_CTRL_MAC_ADDR, \ VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ VIRTIO_NET_F_VQ_NOTF_COAL, \ VIRTIO_NET_F_GUEST_HDRLEN static unsigned int features[] = { VIRTNET_FEATURES, }; static unsigned int features_legacy[] = { VIRTNET_FEATURES, VIRTIO_NET_F_GSO, VIRTIO_F_ANY_LAYOUT, }; static struct virtio_driver virtio_net_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .validate = virtnet_validate, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, #ifdef CONFIG_PM_SLEEP .freeze = virtnet_freeze, .restore = virtnet_restore, #endif }; static __init int virtio_net_driver_init(void) { int ret; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", virtnet_cpu_online, virtnet_cpu_down_prep); if (ret < 0) goto out; virtionet_online = ret; ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", NULL, virtnet_cpu_dead); if (ret) goto err_dead; ret = register_virtio_driver(&virtio_net_driver); if (ret) goto err_virtio; return 0; err_virtio: cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); err_dead: cpuhp_remove_multi_state(virtionet_online); out: return ret; } module_init(virtio_net_driver_init); static __exit void virtio_net_driver_exit(void) { unregister_virtio_driver(&virtio_net_driver); cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); cpuhp_remove_multi_state(virtionet_online); } module_exit(virtio_net_driver_exit); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); MODULE_LICENSE("GPL");
linux-master
drivers/net/virtio_net.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Pseudo-driver for the loopback interface. * * Version: @(#)loopback.c 1.0.4b 08/16/93 * * Authors: Ross Biro * Fred N. van Kempen, <[email protected]> * Donald Becker, <[email protected]> * * Alan Cox : Fixed oddments for NET3.014 * Alan Cox : Rejig for NET3.029 snap #3 * Alan Cox : Fixed NET3.029 bugs and sped up * Larry McVoy : Tiny tweak to double performance * Alan Cox : Backed out LMV's tweak - the linux mm * can't take it... * Michael Griffith: Don't bother computing the checksums * on packets received on the loopback * interface. * Alexey Kuznetsov: Potential hang under some extreme * cases removed. */ #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/in.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <net/sch_generic.h> #include <net/sock.h> #include <net/checksum.h> #include <linux/if_ether.h> /* For the statistics structure. */ #include <linux/if_arp.h> /* For ARPHRD_ETHER */ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/percpu.h> #include <linux/net_tstamp.h> #include <net/net_namespace.h> #include <linux/u64_stats_sync.h> /* blackhole_netdev - a device used for dsts that are marked expired! * This is global device (instead of per-net-ns) since it's not needed * to be per-ns and gets initialized at boot time. */ struct net_device *blackhole_netdev; EXPORT_SYMBOL(blackhole_netdev); /* The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ static netdev_tx_t loopback_xmit(struct sk_buff *skb, struct net_device *dev) { int len; skb_tx_timestamp(skb); /* do not fool net_timestamp_check() with various clock bases */ skb_clear_tstamp(skb); skb_orphan(skb); /* Before queueing this packet to __netif_rx(), * make sure dst is refcounted. */ skb_dst_force(skb); skb->protocol = eth_type_trans(skb, dev); len = skb->len; if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) dev_lstats_add(dev, len); return NETDEV_TX_OK; } void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes) { int i; *packets = 0; *bytes = 0; for_each_possible_cpu(i) { const struct pcpu_lstats *lb_stats; u64 tbytes, tpackets; unsigned int start; lb_stats = per_cpu_ptr(dev->lstats, i); do { start = u64_stats_fetch_begin(&lb_stats->syncp); tpackets = u64_stats_read(&lb_stats->packets); tbytes = u64_stats_read(&lb_stats->bytes); } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); *bytes += tbytes; *packets += tpackets; } } EXPORT_SYMBOL(dev_lstats_read); static void loopback_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u64 packets, bytes; dev_lstats_read(dev, &packets, &bytes); stats->rx_packets = packets; stats->tx_packets = packets; stats->rx_bytes = bytes; stats->tx_bytes = bytes; } static u32 always_on(struct net_device *dev) { return 1; } static const struct ethtool_ops loopback_ethtool_ops = { .get_link = always_on, .get_ts_info = ethtool_op_get_ts_info, }; static int loopback_dev_init(struct net_device *dev) { dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); if (!dev->lstats) return -ENOMEM; return 0; } static void loopback_dev_free(struct net_device *dev) { dev_net(dev)->loopback_dev = NULL; free_percpu(dev->lstats); } static const struct net_device_ops loopback_ops = { .ndo_init = loopback_dev_init, .ndo_start_xmit = loopback_xmit, .ndo_get_stats64 = loopback_get_stats64, .ndo_set_mac_address = eth_mac_addr, }; static void gen_lo_setup(struct net_device *dev, unsigned int mtu, const struct ethtool_ops *eth_ops, const struct header_ops *hdr_ops, const struct net_device_ops *dev_ops, void (*dev_destructor)(struct net_device *dev)) { dev->mtu = mtu; dev->hard_header_len = ETH_HLEN; /* 14 */ dev->min_header_len = ETH_HLEN; /* 14 */ dev->addr_len = ETH_ALEN; /* 6 */ dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ dev->flags = IFF_LOOPBACK; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; netif_keep_dst(dev); dev->hw_features = NETIF_F_GSO_SOFTWARE; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | NETIF_F_VLAN_CHALLENGED | NETIF_F_LOOPBACK; dev->ethtool_ops = eth_ops; dev->header_ops = hdr_ops; dev->netdev_ops = dev_ops; dev->needs_free_netdev = true; dev->priv_destructor = dev_destructor; netif_set_tso_max_size(dev, GSO_MAX_SIZE); } /* The loopback device is special. There is only one instance * per network namespace. */ static void loopback_setup(struct net_device *dev) { gen_lo_setup(dev, (64 * 1024), &loopback_ethtool_ops, &eth_header_ops, &loopback_ops, loopback_dev_free); } /* Setup and register the loopback device. */ static __net_init int loopback_net_init(struct net *net) { struct net_device *dev; int err; err = -ENOMEM; dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup); if (!dev) goto out; dev_net_set(dev, net); err = register_netdev(dev); if (err) goto out_free_netdev; BUG_ON(dev->ifindex != LOOPBACK_IFINDEX); net->loopback_dev = dev; return 0; out_free_netdev: free_netdev(dev); out: if (net_eq(net, &init_net)) panic("loopback: Failed to register netdevice: %d\n", err); return err; } /* Registered in net/core/dev.c */ struct pernet_operations __net_initdata loopback_net_ops = { .init = loopback_net_init, }; /* blackhole netdevice */ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb, struct net_device *dev) { kfree_skb(skb); net_warn_ratelimited("%s(): Dropping skb.\n", __func__); return NETDEV_TX_OK; } static const struct net_device_ops blackhole_netdev_ops = { .ndo_start_xmit = blackhole_netdev_xmit, }; /* This is a dst-dummy device used specifically for invalidated * DSTs and unlike loopback, this is not per-ns. */ static void blackhole_netdev_setup(struct net_device *dev) { gen_lo_setup(dev, ETH_MIN_MTU, NULL, NULL, &blackhole_netdev_ops, NULL); } /* Setup and register the blackhole_netdev. */ static int __init blackhole_netdev_init(void) { blackhole_netdev = alloc_netdev(0, "blackhole_dev", NET_NAME_UNKNOWN, blackhole_netdev_setup); if (!blackhole_netdev) return -ENOMEM; rtnl_lock(); dev_init_scheduler(blackhole_netdev); dev_activate(blackhole_netdev); rtnl_unlock(); blackhole_netdev->flags |= IFF_UP | IFF_RUNNING; dev_net_set(blackhole_netdev, &init_net); return 0; } device_initcall(blackhole_netdev_init);
linux-master
drivers/net/loopback.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * rionet - Ethernet driver over RapidIO messaging services * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/slab.h> #include <linux/rio_ids.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/reboot.h> #define DRV_NAME "rionet" #define DRV_VERSION "0.3" #define DRV_AUTHOR "Matt Porter <[email protected]>" #define DRV_DESC "Ethernet over RapidIO" MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); #define RIONET_DEFAULT_MSGLEVEL \ (NETIF_MSG_DRV | \ NETIF_MSG_LINK | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) #define RIONET_DOORBELL_JOIN 0x1000 #define RIONET_DOORBELL_LEAVE 0x1001 #define RIONET_MAILBOX 0 #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE #define RIONET_MAX_NETS 8 #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN) struct rionet_private { struct rio_mport *mport; struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; int rx_slot; int tx_slot; int tx_cnt; int ack_slot; spinlock_t lock; spinlock_t tx_lock; u32 msg_enable; bool open; }; struct rionet_peer { struct list_head node; struct rio_dev *rdev; struct resource *res; }; struct rionet_net { struct net_device *ndev; struct list_head peers; spinlock_t lock; /* net info access lock */ struct rio_dev **active; int nact; /* number of active peers */ }; static struct rionet_net nets[RIONET_MAX_NETS]; #define is_rionet_capable(src_ops, dst_ops) \ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ (dst_ops & RIO_DST_OPS_DATA_MSG) && \ (src_ops & RIO_SRC_OPS_DOORBELL) && \ (dst_ops & RIO_DST_OPS_DOORBELL)) #define dev_rionet_capable(dev) \ is_rionet_capable(dev->src_ops, dev->dst_ops) #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4)) #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5)) static int rionet_rx_clean(struct net_device *ndev) { int i; int error = 0; struct rionet_private *rnet = netdev_priv(ndev); void *data; i = rnet->rx_slot; do { if (!rnet->rx_skb[i]) continue; if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX))) break; rnet->rx_skb[i]->data = data; skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); rnet->rx_skb[i]->protocol = eth_type_trans(rnet->rx_skb[i], ndev); error = __netif_rx(rnet->rx_skb[i]); if (error == NET_RX_DROP) { ndev->stats.rx_dropped++; } else { ndev->stats.rx_packets++; ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE; } } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); return i; } static void rionet_rx_fill(struct net_device *ndev, int end) { int i; struct rionet_private *rnet = netdev_priv(ndev); i = rnet->rx_slot; do { rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE); if (!rnet->rx_skb[i]) break; rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX, rnet->rx_skb[i]->data); } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end); rnet->rx_slot = i; } static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, struct rio_dev *rdev) { struct rionet_private *rnet = netdev_priv(ndev); rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); rnet->tx_skb[rnet->tx_slot] = skb; ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) netif_stop_queue(ndev); ++rnet->tx_slot; rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1); if (netif_msg_tx_queued(rnet)) printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME, skb->len); return 0; } static netdev_tx_t rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int i; struct rionet_private *rnet = netdev_priv(ndev); struct ethhdr *eth = (struct ethhdr *)skb->data; u16 destid; unsigned long flags; int add_num = 1; spin_lock_irqsave(&rnet->tx_lock, flags); if (is_multicast_ether_addr(eth->h_dest)) add_num = nets[rnet->mport->id].nact; if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) { netif_stop_queue(ndev); spin_unlock_irqrestore(&rnet->tx_lock, flags); printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", ndev->name); return NETDEV_TX_BUSY; } if (is_multicast_ether_addr(eth->h_dest)) { int count = 0; for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); i++) if (nets[rnet->mport->id].active[i]) { rionet_queue_tx_msg(skb, ndev, nets[rnet->mport->id].active[i]); if (count) refcount_inc(&skb->users); count++; } } else if (RIONET_MAC_MATCH(eth->h_dest)) { destid = RIONET_GET_DESTID(eth->h_dest); if (nets[rnet->mport->id].active[destid]) rionet_queue_tx_msg(skb, ndev, nets[rnet->mport->id].active[destid]); else { /* * If the target device was removed from the list of * active peers but we still have TX packets targeting * it just report sending a packet to the target * (without actual packet transfer). */ ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; dev_kfree_skb_any(skb); } } spin_unlock_irqrestore(&rnet->tx_lock, flags); return NETDEV_TX_OK; } static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid, u16 info) { struct net_device *ndev = dev_id; struct rionet_private *rnet = netdev_priv(ndev); struct rionet_peer *peer; unsigned char netid = rnet->mport->id; if (netif_msg_intr(rnet)) printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", DRV_NAME, sid, tid, info); if (info == RIONET_DOORBELL_JOIN) { if (!nets[netid].active[sid]) { spin_lock(&nets[netid].lock); list_for_each_entry(peer, &nets[netid].peers, node) { if (peer->rdev->destid == sid) { nets[netid].active[sid] = peer->rdev; nets[netid].nact++; } } spin_unlock(&nets[netid].lock); rio_mport_send_doorbell(mport, sid, RIONET_DOORBELL_JOIN); } } else if (info == RIONET_DOORBELL_LEAVE) { spin_lock(&nets[netid].lock); if (nets[netid].active[sid]) { nets[netid].active[sid] = NULL; nets[netid].nact--; } spin_unlock(&nets[netid].lock); } else { if (netif_msg_intr(rnet)) printk(KERN_WARNING "%s: unhandled doorbell\n", DRV_NAME); } } static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) { int n; struct net_device *ndev = dev_id; struct rionet_private *rnet = netdev_priv(ndev); if (netif_msg_intr(rnet)) printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n", DRV_NAME, mbox, slot); spin_lock(&rnet->lock); if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot) rionet_rx_fill(ndev, n); spin_unlock(&rnet->lock); } static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) { struct net_device *ndev = dev_id; struct rionet_private *rnet = netdev_priv(ndev); spin_lock(&rnet->tx_lock); if (netif_msg_intr(rnet)) printk(KERN_INFO "%s: outbound message event, mbox %d slot %d\n", DRV_NAME, mbox, slot); while (rnet->tx_cnt && (rnet->ack_slot != slot)) { /* dma unmap single */ dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]); rnet->tx_skb[rnet->ack_slot] = NULL; ++rnet->ack_slot; rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1); rnet->tx_cnt--; } if (rnet->tx_cnt < RIONET_TX_RING_SIZE) netif_wake_queue(ndev); spin_unlock(&rnet->tx_lock); } static int rionet_open(struct net_device *ndev) { int i, rc = 0; struct rionet_peer *peer; struct rionet_private *rnet = netdev_priv(ndev); unsigned char netid = rnet->mport->id; unsigned long flags; if (netif_msg_ifup(rnet)) printk(KERN_INFO "%s: open\n", DRV_NAME); if ((rc = rio_request_inb_dbell(rnet->mport, (void *)ndev, RIONET_DOORBELL_JOIN, RIONET_DOORBELL_LEAVE, rionet_dbell_event)) < 0) goto out; if ((rc = rio_request_inb_mbox(rnet->mport, (void *)ndev, RIONET_MAILBOX, RIONET_RX_RING_SIZE, rionet_inb_msg_event)) < 0) goto out; if ((rc = rio_request_outb_mbox(rnet->mport, (void *)ndev, RIONET_MAILBOX, RIONET_TX_RING_SIZE, rionet_outb_msg_event)) < 0) goto out; /* Initialize inbound message ring */ for (i = 0; i < RIONET_RX_RING_SIZE; i++) rnet->rx_skb[i] = NULL; rnet->rx_slot = 0; rionet_rx_fill(ndev, 0); rnet->tx_slot = 0; rnet->tx_cnt = 0; rnet->ack_slot = 0; netif_carrier_on(ndev); netif_start_queue(ndev); spin_lock_irqsave(&nets[netid].lock, flags); list_for_each_entry(peer, &nets[netid].peers, node) { /* Send a join message */ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); } spin_unlock_irqrestore(&nets[netid].lock, flags); rnet->open = true; out: return rc; } static int rionet_close(struct net_device *ndev) { struct rionet_private *rnet = netdev_priv(ndev); struct rionet_peer *peer; unsigned char netid = rnet->mport->id; unsigned long flags; int i; if (netif_msg_ifup(rnet)) printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name); netif_stop_queue(ndev); netif_carrier_off(ndev); rnet->open = false; for (i = 0; i < RIONET_RX_RING_SIZE; i++) kfree_skb(rnet->rx_skb[i]); spin_lock_irqsave(&nets[netid].lock, flags); list_for_each_entry(peer, &nets[netid].peers, node) { if (nets[netid].active[peer->rdev->destid]) { rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); nets[netid].active[peer->rdev->destid] = NULL; } if (peer->res) rio_release_outb_dbell(peer->rdev, peer->res); } spin_unlock_irqrestore(&nets[netid].lock, flags); rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN, RIONET_DOORBELL_LEAVE); rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX); rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX); return 0; } static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif) { struct rio_dev *rdev = to_rio_dev(dev); unsigned char netid = rdev->net->hport->id; struct rionet_peer *peer; int state, found = 0; unsigned long flags; if (!dev_rionet_capable(rdev)) return; spin_lock_irqsave(&nets[netid].lock, flags); list_for_each_entry(peer, &nets[netid].peers, node) { if (peer->rdev == rdev) { list_del(&peer->node); if (nets[netid].active[rdev->destid]) { state = atomic_read(&rdev->state); if (state != RIO_DEVICE_GONE && state != RIO_DEVICE_INITIALIZING) { rio_send_doorbell(rdev, RIONET_DOORBELL_LEAVE); } nets[netid].active[rdev->destid] = NULL; nets[netid].nact--; } found = 1; break; } } spin_unlock_irqrestore(&nets[netid].lock, flags); if (found) { if (peer->res) rio_release_outb_dbell(rdev, peer->res); kfree(peer); } } static void rionet_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct rionet_private *rnet = netdev_priv(ndev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); strscpy(info->fw_version, "n/a", sizeof(info->fw_version)); strscpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info)); } static u32 rionet_get_msglevel(struct net_device *ndev) { struct rionet_private *rnet = netdev_priv(ndev); return rnet->msg_enable; } static void rionet_set_msglevel(struct net_device *ndev, u32 value) { struct rionet_private *rnet = netdev_priv(ndev); rnet->msg_enable = value; } static const struct ethtool_ops rionet_ethtool_ops = { .get_drvinfo = rionet_get_drvinfo, .get_msglevel = rionet_get_msglevel, .set_msglevel = rionet_set_msglevel, .get_link = ethtool_op_get_link, }; static const struct net_device_ops rionet_netdev_ops = { .ndo_open = rionet_open, .ndo_stop = rionet_close, .ndo_start_xmit = rionet_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) { int rc = 0; struct rionet_private *rnet; u8 addr[ETH_ALEN]; u16 device_id; const size_t rionet_active_bytes = sizeof(void *) * RIO_MAX_ROUTE_ENTRIES(mport->sys_size); nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, get_order(rionet_active_bytes)); if (!nets[mport->id].active) { rc = -ENOMEM; goto out; } memset((void *)nets[mport->id].active, 0, rionet_active_bytes); /* Set up private area */ rnet = netdev_priv(ndev); rnet->mport = mport; rnet->open = false; /* Set the default MAC address */ device_id = rio_local_get_device_id(mport); addr[0] = 0x00; addr[1] = 0x01; addr[2] = 0x00; addr[3] = 0x01; addr[4] = device_id >> 8; addr[5] = device_id & 0xff; eth_hw_addr_set(ndev, addr); ndev->netdev_ops = &rionet_netdev_ops; ndev->mtu = RIONET_MAX_MTU; /* MTU range: 68 - 4082 */ ndev->min_mtu = ETH_MIN_MTU; ndev->max_mtu = RIONET_MAX_MTU; ndev->features = NETIF_F_LLTX; SET_NETDEV_DEV(ndev, &mport->dev); ndev->ethtool_ops = &rionet_ethtool_ops; spin_lock_init(&rnet->lock); spin_lock_init(&rnet->tx_lock); rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL; rc = register_netdev(ndev); if (rc != 0) { free_pages((unsigned long)nets[mport->id].active, get_order(rionet_active_bytes)); goto out; } printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n", ndev->name, DRV_NAME, DRV_DESC, DRV_VERSION, ndev->dev_addr, mport->name); out: return rc; } static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) { int rc = -ENODEV; u32 lsrc_ops, ldst_ops; struct rionet_peer *peer; struct net_device *ndev = NULL; struct rio_dev *rdev = to_rio_dev(dev); unsigned char netid = rdev->net->hport->id; if (netid >= RIONET_MAX_NETS) return rc; /* * If first time through this net, make sure local device is rionet * capable and setup netdev (this step will be skipped in later probes * on the same net). */ if (!nets[netid].ndev) { rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, &lsrc_ops); rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, &ldst_ops); if (!is_rionet_capable(lsrc_ops, ldst_ops)) { printk(KERN_ERR "%s: local device %s is not network capable\n", DRV_NAME, rdev->net->hport->name); goto out; } /* Allocate our net_device structure */ ndev = alloc_etherdev(sizeof(struct rionet_private)); if (ndev == NULL) { rc = -ENOMEM; goto out; } rc = rionet_setup_netdev(rdev->net->hport, ndev); if (rc) { printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n", DRV_NAME, rc); free_netdev(ndev); goto out; } INIT_LIST_HEAD(&nets[netid].peers); spin_lock_init(&nets[netid].lock); nets[netid].nact = 0; nets[netid].ndev = ndev; } /* * If the remote device has mailbox/doorbell capabilities, * add it to the peer list. */ if (dev_rionet_capable(rdev)) { struct rionet_private *rnet; unsigned long flags; rnet = netdev_priv(nets[netid].ndev); peer = kzalloc(sizeof(*peer), GFP_KERNEL); if (!peer) { rc = -ENOMEM; goto out; } peer->rdev = rdev; peer->res = rio_request_outb_dbell(peer->rdev, RIONET_DOORBELL_JOIN, RIONET_DOORBELL_LEAVE); if (!peer->res) { pr_err("%s: error requesting doorbells\n", DRV_NAME); kfree(peer); rc = -ENOMEM; goto out; } spin_lock_irqsave(&nets[netid].lock, flags); list_add_tail(&peer->node, &nets[netid].peers); spin_unlock_irqrestore(&nets[netid].lock, flags); pr_debug("%s: %s add peer %s\n", DRV_NAME, __func__, rio_name(rdev)); /* If netdev is already opened, send join request to new peer */ if (rnet->open) rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); } return 0; out: return rc; } static int rionet_shutdown(struct notifier_block *nb, unsigned long code, void *unused) { struct rionet_peer *peer; unsigned long flags; int i; pr_debug("%s: %s\n", DRV_NAME, __func__); for (i = 0; i < RIONET_MAX_NETS; i++) { if (!nets[i].ndev) continue; spin_lock_irqsave(&nets[i].lock, flags); list_for_each_entry(peer, &nets[i].peers, node) { if (nets[i].active[peer->rdev->destid]) { rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); nets[i].active[peer->rdev->destid] = NULL; } } spin_unlock_irqrestore(&nets[i].lock, flags); } return NOTIFY_DONE; } static void rionet_remove_mport(struct device *dev) { struct rio_mport *mport = to_rio_mport(dev); struct net_device *ndev; int id = mport->id; pr_debug("%s %s\n", __func__, mport->name); WARN(nets[id].nact, "%s called when connected to %d peers\n", __func__, nets[id].nact); WARN(!nets[id].ndev, "%s called for mport without NDEV\n", __func__); if (nets[id].ndev) { ndev = nets[id].ndev; netif_stop_queue(ndev); unregister_netdev(ndev); free_pages((unsigned long)nets[id].active, get_order(sizeof(void *) * RIO_MAX_ROUTE_ENTRIES(mport->sys_size))); nets[id].active = NULL; free_netdev(ndev); nets[id].ndev = NULL; } } #ifdef MODULE static struct rio_device_id rionet_id_table[] = { {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}, { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(rapidio, rionet_id_table); #endif static struct subsys_interface rionet_interface = { .name = "rionet", .subsys = &rio_bus_type, .add_dev = rionet_add_dev, .remove_dev = rionet_remove_dev, }; static struct notifier_block rionet_notifier = { .notifier_call = rionet_shutdown, }; /* the rio_mport_interface is used to handle local mport devices */ static struct class_interface rio_mport_interface __refdata = { .class = &rio_mport_class, .add_dev = NULL, .remove_dev = rionet_remove_mport, }; static int __init rionet_init(void) { int ret; ret = register_reboot_notifier(&rionet_notifier); if (ret) { pr_err("%s: failed to register reboot notifier (err=%d)\n", DRV_NAME, ret); return ret; } ret = class_interface_register(&rio_mport_interface); if (ret) { pr_err("%s: class_interface_register error: %d\n", DRV_NAME, ret); return ret; } return subsys_interface_register(&rionet_interface); } static void __exit rionet_exit(void) { unregister_reboot_notifier(&rionet_notifier); subsys_interface_unregister(&rionet_interface); class_interface_unregister(&rio_mport_interface); } late_initcall(rionet_init); module_exit(rionet_exit);
linux-master
drivers/net/rionet.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller * * Copyright (C) 2018 Xue Liu <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio/consumer.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/skbuff.h> #include <linux/of_gpio.h> #include <linux/regmap.h> #include <linux/ieee802154.h> #include <linux/debugfs.h> #include <net/mac802154.h> #include <net/cfg802154.h> #include <linux/device.h> #include "mcr20a.h" #define SPI_COMMAND_BUFFER 3 #define REGISTER_READ BIT(7) #define REGISTER_WRITE (0 << 7) #define REGISTER_ACCESS (0 << 6) #define PACKET_BUFF_BURST_ACCESS BIT(6) #define PACKET_BUFF_BYTE_ACCESS BIT(5) #define MCR20A_WRITE_REG(x) (x) #define MCR20A_READ_REG(x) (REGISTER_READ | (x)) #define MCR20A_BURST_READ_PACKET_BUF (0xC0) #define MCR20A_BURST_WRITE_PACKET_BUF (0x40) #define MCR20A_CMD_REG 0x80 #define MCR20A_CMD_REG_MASK 0x3f #define MCR20A_CMD_WRITE 0x40 #define MCR20A_CMD_FB 0x20 /* Number of Interrupt Request Status Register */ #define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */ /* MCR20A CCA Type */ enum { MCR20A_CCA_ED, // energy detect - CCA bit not active, // not to be used for T and CCCA sequences MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE MCR20A_CCA_MODE3 }; enum { MCR20A_XCVSEQ_IDLE = 0x00, MCR20A_XCVSEQ_RX = 0x01, MCR20A_XCVSEQ_TX = 0x02, MCR20A_XCVSEQ_CCA = 0x03, MCR20A_XCVSEQ_TR = 0x04, MCR20A_XCVSEQ_CCCA = 0x05, }; /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */ #define MCR20A_MIN_CHANNEL (11) #define MCR20A_MAX_CHANNEL (26) #define MCR20A_CHANNEL_SPACING (5) /* MCR20A CCA Threshold constans */ #define MCR20A_MIN_CCA_THRESHOLD (0x6EU) #define MCR20A_MAX_CCA_THRESHOLD (0x00U) /* version 0C */ #define MCR20A_OVERWRITE_VERSION (0x0C) /* MCR20A PLL configurations */ static const u8 PLL_INT[16] = { /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B, /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B, /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C, /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C, /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D, /* 2480 */ 0x0D }; static const u8 PLL_FRAC[16] = { /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78, /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0, /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68, /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0, /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58, /* 2480 */ 0x80 }; static const struct reg_sequence mar20a_iar_overwrites[] = { { IAR_MISC_PAD_CTRL, 0x02 }, { IAR_VCO_CTRL1, 0xB3 }, { IAR_VCO_CTRL2, 0x07 }, { IAR_PA_TUNING, 0x71 }, { IAR_CHF_IBUF, 0x2F }, { IAR_CHF_QBUF, 0x2F }, { IAR_CHF_IRIN, 0x24 }, { IAR_CHF_QRIN, 0x24 }, { IAR_CHF_IL, 0x24 }, { IAR_CHF_QL, 0x24 }, { IAR_CHF_CC1, 0x32 }, { IAR_CHF_CCL, 0x1D }, { IAR_CHF_CC2, 0x2D }, { IAR_CHF_IROUT, 0x24 }, { IAR_CHF_QROUT, 0x24 }, { IAR_PA_CAL, 0x28 }, { IAR_AGC_THR1, 0x55 }, { IAR_AGC_THR2, 0x2D }, { IAR_ATT_RSSI1, 0x5F }, { IAR_ATT_RSSI2, 0x8F }, { IAR_RSSI_OFFSET, 0x61 }, { IAR_CHF_PMA_GAIN, 0x03 }, { IAR_CCA1_THRESH, 0x50 }, { IAR_CORR_NVAL, 0x13 }, { IAR_ACKDELAY, 0x3D }, }; #define MCR20A_VALID_CHANNELS (0x07FFF800) #define MCR20A_MAX_BUF (127) #define printdev(X) (&X->spi->dev) /* regmap information for Direct Access Register (DAR) access */ #define MCR20A_DAR_WRITE 0x01 #define MCR20A_DAR_READ 0x00 #define MCR20A_DAR_NUMREGS 0x3F /* regmap information for Indirect Access Register (IAR) access */ #define MCR20A_IAR_ACCESS 0x80 #define MCR20A_IAR_NUMREGS 0xBEFF /* Read/Write SPI Commands for DAR and IAR registers. */ #define MCR20A_READSHORT(reg) ((reg) << 1) #define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1) #define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5) #define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4) /* Type definitions for link configuration of instantiable layers */ #define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12) static bool mcr20a_dar_writeable(struct device *dev, unsigned int reg) { switch (reg) { case DAR_IRQ_STS1: case DAR_IRQ_STS2: case DAR_IRQ_STS3: case DAR_PHY_CTRL1: case DAR_PHY_CTRL2: case DAR_PHY_CTRL3: case DAR_PHY_CTRL4: case DAR_SRC_CTRL: case DAR_SRC_ADDRS_SUM_LSB: case DAR_SRC_ADDRS_SUM_MSB: case DAR_T3CMP_LSB: case DAR_T3CMP_MSB: case DAR_T3CMP_USB: case DAR_T2PRIMECMP_LSB: case DAR_T2PRIMECMP_MSB: case DAR_T1CMP_LSB: case DAR_T1CMP_MSB: case DAR_T1CMP_USB: case DAR_T2CMP_LSB: case DAR_T2CMP_MSB: case DAR_T2CMP_USB: case DAR_T4CMP_LSB: case DAR_T4CMP_MSB: case DAR_T4CMP_USB: case DAR_PLL_INT0: case DAR_PLL_FRAC0_LSB: case DAR_PLL_FRAC0_MSB: case DAR_PA_PWR: /* no DAR_ACM */ case DAR_OVERWRITE_VER: case DAR_CLK_OUT_CTRL: case DAR_PWR_MODES: return true; default: return false; } } static bool mcr20a_dar_readable(struct device *dev, unsigned int reg) { bool rc; /* all writeable are also readable */ rc = mcr20a_dar_writeable(dev, reg); if (rc) return rc; /* readonly regs */ switch (reg) { case DAR_RX_FRM_LEN: case DAR_CCA1_ED_FNL: case DAR_EVENT_TMR_LSB: case DAR_EVENT_TMR_MSB: case DAR_EVENT_TMR_USB: case DAR_TIMESTAMP_LSB: case DAR_TIMESTAMP_MSB: case DAR_TIMESTAMP_USB: case DAR_SEQ_STATE: case DAR_LQI_VALUE: case DAR_RSSI_CCA_CONT: return true; default: return false; } } static bool mcr20a_dar_volatile(struct device *dev, unsigned int reg) { /* can be changed during runtime */ switch (reg) { case DAR_IRQ_STS1: case DAR_IRQ_STS2: case DAR_IRQ_STS3: /* use them in spi_async and regmap so it's volatile */ return true; default: return false; } } static bool mcr20a_dar_precious(struct device *dev, unsigned int reg) { /* don't clear irq line on read */ switch (reg) { case DAR_IRQ_STS1: case DAR_IRQ_STS2: case DAR_IRQ_STS3: return true; default: return false; } } static const struct regmap_config mcr20a_dar_regmap = { .name = "mcr20a_dar", .reg_bits = 8, .val_bits = 8, .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE, .read_flag_mask = REGISTER_ACCESS | REGISTER_READ, .cache_type = REGCACHE_RBTREE, .writeable_reg = mcr20a_dar_writeable, .readable_reg = mcr20a_dar_readable, .volatile_reg = mcr20a_dar_volatile, .precious_reg = mcr20a_dar_precious, .fast_io = true, .can_multi_write = true, }; static bool mcr20a_iar_writeable(struct device *dev, unsigned int reg) { switch (reg) { case IAR_XTAL_TRIM: case IAR_PMC_LP_TRIM: case IAR_MACPANID0_LSB: case IAR_MACPANID0_MSB: case IAR_MACSHORTADDRS0_LSB: case IAR_MACSHORTADDRS0_MSB: case IAR_MACLONGADDRS0_0: case IAR_MACLONGADDRS0_8: case IAR_MACLONGADDRS0_16: case IAR_MACLONGADDRS0_24: case IAR_MACLONGADDRS0_32: case IAR_MACLONGADDRS0_40: case IAR_MACLONGADDRS0_48: case IAR_MACLONGADDRS0_56: case IAR_RX_FRAME_FILTER: case IAR_PLL_INT1: case IAR_PLL_FRAC1_LSB: case IAR_PLL_FRAC1_MSB: case IAR_MACPANID1_LSB: case IAR_MACPANID1_MSB: case IAR_MACSHORTADDRS1_LSB: case IAR_MACSHORTADDRS1_MSB: case IAR_MACLONGADDRS1_0: case IAR_MACLONGADDRS1_8: case IAR_MACLONGADDRS1_16: case IAR_MACLONGADDRS1_24: case IAR_MACLONGADDRS1_32: case IAR_MACLONGADDRS1_40: case IAR_MACLONGADDRS1_48: case IAR_MACLONGADDRS1_56: case IAR_DUAL_PAN_CTRL: case IAR_DUAL_PAN_DWELL: case IAR_CCA1_THRESH: case IAR_CCA1_ED_OFFSET_COMP: case IAR_LQI_OFFSET_COMP: case IAR_CCA_CTRL: case IAR_CCA2_CORR_PEAKS: case IAR_CCA2_CORR_THRESH: case IAR_TMR_PRESCALE: case IAR_ANT_PAD_CTRL: case IAR_MISC_PAD_CTRL: case IAR_BSM_CTRL: case IAR_RNG: case IAR_RX_WTR_MARK: case IAR_SOFT_RESET: case IAR_TXDELAY: case IAR_ACKDELAY: case IAR_CORR_NVAL: case IAR_ANT_AGC_CTRL: case IAR_AGC_THR1: case IAR_AGC_THR2: case IAR_PA_CAL: case IAR_ATT_RSSI1: case IAR_ATT_RSSI2: case IAR_RSSI_OFFSET: case IAR_XTAL_CTRL: case IAR_CHF_PMA_GAIN: case IAR_CHF_IBUF: case IAR_CHF_QBUF: case IAR_CHF_IRIN: case IAR_CHF_QRIN: case IAR_CHF_IL: case IAR_CHF_QL: case IAR_CHF_CC1: case IAR_CHF_CCL: case IAR_CHF_CC2: case IAR_CHF_IROUT: case IAR_CHF_QROUT: case IAR_PA_TUNING: case IAR_VCO_CTRL1: case IAR_VCO_CTRL2: return true; default: return false; } } static bool mcr20a_iar_readable(struct device *dev, unsigned int reg) { bool rc; /* all writeable are also readable */ rc = mcr20a_iar_writeable(dev, reg); if (rc) return rc; /* readonly regs */ switch (reg) { case IAR_PART_ID: case IAR_DUAL_PAN_STS: case IAR_RX_BYTE_COUNT: case IAR_FILTERFAIL_CODE1: case IAR_FILTERFAIL_CODE2: case IAR_RSSI: return true; default: return false; } } static bool mcr20a_iar_volatile(struct device *dev, unsigned int reg) { /* can be changed during runtime */ switch (reg) { case IAR_DUAL_PAN_STS: case IAR_RX_BYTE_COUNT: case IAR_FILTERFAIL_CODE1: case IAR_FILTERFAIL_CODE2: case IAR_RSSI: return true; default: return false; } } static const struct regmap_config mcr20a_iar_regmap = { .name = "mcr20a_iar", .reg_bits = 16, .val_bits = 8, .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX, .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX, .cache_type = REGCACHE_RBTREE, .writeable_reg = mcr20a_iar_writeable, .readable_reg = mcr20a_iar_readable, .volatile_reg = mcr20a_iar_volatile, .fast_io = true, }; struct mcr20a_local { struct spi_device *spi; struct ieee802154_hw *hw; struct regmap *regmap_dar; struct regmap *regmap_iar; u8 *buf; bool is_tx; /* for writing tx buffer */ struct spi_message tx_buf_msg; u8 tx_header[1]; /* burst buffer write command */ struct spi_transfer tx_xfer_header; u8 tx_len[1]; /* len of tx packet */ struct spi_transfer tx_xfer_len; /* data of tx packet */ struct spi_transfer tx_xfer_buf; struct sk_buff *tx_skb; /* for read length rxfifo */ struct spi_message reg_msg; u8 reg_cmd[1]; u8 reg_data[MCR20A_IRQSTS_NUM]; struct spi_transfer reg_xfer_cmd; struct spi_transfer reg_xfer_data; /* receive handling */ struct spi_message rx_buf_msg; u8 rx_header[1]; struct spi_transfer rx_xfer_header; u8 rx_lqi[1]; struct spi_transfer rx_xfer_lqi; u8 rx_buf[MCR20A_MAX_BUF]; struct spi_transfer rx_xfer_buf; /* isr handling for reading intstat */ struct spi_message irq_msg; u8 irq_header[1]; u8 irq_data[MCR20A_IRQSTS_NUM]; struct spi_transfer irq_xfer_data; struct spi_transfer irq_xfer_header; }; static void mcr20a_write_tx_buf_complete(void *context) { struct mcr20a_local *lp = context; int ret; dev_dbg(printdev(lp), "%s\n", __func__); lp->reg_msg.complete = NULL; lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1); lp->reg_data[0] = MCR20A_XCVSEQ_TX; lp->reg_xfer_data.len = 1; ret = spi_async(lp->spi, &lp->reg_msg); if (ret) dev_err(printdev(lp), "failed to set SEQ TX\n"); } static int mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct mcr20a_local *lp = hw->priv; dev_dbg(printdev(lp), "%s\n", __func__); lp->tx_skb = skb; print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, 0); lp->is_tx = 1; lp->reg_msg.complete = NULL; lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1); lp->reg_data[0] = MCR20A_XCVSEQ_IDLE; lp->reg_xfer_data.len = 1; return spi_async(lp->spi, &lp->reg_msg); } static int mcr20a_ed(struct ieee802154_hw *hw, u8 *level) { WARN_ON(!level); *level = 0xbe; return 0; } static int mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct mcr20a_local *lp = hw->priv; int ret; dev_dbg(printdev(lp), "%s\n", __func__); /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */ ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]); if (ret) return ret; ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00); if (ret) return ret; ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB, PLL_FRAC[channel - 11]); if (ret) return ret; return 0; } static int mcr20a_start(struct ieee802154_hw *hw) { struct mcr20a_local *lp = hw->priv; int ret; dev_dbg(printdev(lp), "%s\n", __func__); /* No slotted operation */ dev_dbg(printdev(lp), "no slotted operation\n"); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_SLOTTED, 0x0); if (ret < 0) return ret; /* enable irq */ enable_irq(lp->spi->irq); /* Unmask SEQ interrupt */ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2, DAR_PHY_CTRL2_SEQMSK, 0x0); if (ret < 0) return ret; /* Start the RX sequence */ dev_dbg(printdev(lp), "start the RX sequence\n"); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); if (ret < 0) return ret; return 0; } static void mcr20a_stop(struct ieee802154_hw *hw) { struct mcr20a_local *lp = hw->priv; dev_dbg(printdev(lp), "%s\n", __func__); /* stop all running sequence */ regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE); /* disable irq */ disable_irq(lp->spi->irq); } static int mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { struct mcr20a_local *lp = hw->priv; dev_dbg(printdev(lp), "%s\n", __func__); if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr); regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8); } if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 pan = le16_to_cpu(filt->pan_id); regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan); regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8); } if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { u8 addr[8], i; memcpy(addr, &filt->ieee_addr, 8); for (i = 0; i < 8; i++) regmap_write(lp->regmap_iar, IAR_MACLONGADDRS0_0 + i, addr[i]); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { if (filt->pan_coord) { regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, DAR_PHY_CTRL4_PANCORDNTR0, 0x10); } else { regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, DAR_PHY_CTRL4_PANCORDNTR0, 0x00); } } return 0; } /* -30 dBm to 10 dBm */ #define MCR20A_MAX_TX_POWERS 0x14 static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = { -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400, -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000 }; static int mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm) { struct mcr20a_local *lp = hw->priv; u32 i; dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm); for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { if (lp->hw->phy->supported.tx_powers[i] == mbm) return regmap_write(lp->regmap_dar, DAR_PA_PWR, ((i + 8) & 0x1F)); } return -EINVAL; } #define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1]; static int mcr20a_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca) { struct mcr20a_local *lp = hw->priv; unsigned int cca_mode = 0xff; bool cca_mode_and = false; int ret; dev_dbg(printdev(lp), "%s\n", __func__); /* mapping 802.15.4 to driver spec */ switch (cca->mode) { case NL802154_CCA_ENERGY: cca_mode = MCR20A_CCA_MODE1; break; case NL802154_CCA_CARRIER: cca_mode = MCR20A_CCA_MODE2; break; case NL802154_CCA_ENERGY_CARRIER: switch (cca->opt) { case NL802154_CCA_OPT_ENERGY_CARRIER_AND: cca_mode = MCR20A_CCA_MODE3; cca_mode_and = true; break; case NL802154_CCA_OPT_ENERGY_CARRIER_OR: cca_mode = MCR20A_CCA_MODE3; cca_mode_and = false; break; default: return -EINVAL; } break; default: return -EINVAL; } ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, DAR_PHY_CTRL4_CCATYPE_MASK, cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT); if (ret < 0) return ret; if (cca_mode == MCR20A_CCA_MODE3) { if (cca_mode_and) { ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL, IAR_CCA_CTRL_CCA3_AND_NOT_OR, 0x08); } else { ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL, IAR_CCA_CTRL_CCA3_AND_NOT_OR, 0x00); } if (ret < 0) return ret; } return ret; } static int mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct mcr20a_local *lp = hw->priv; u32 i; dev_dbg(printdev(lp), "%s\n", __func__); for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { if (hw->phy->supported.cca_ed_levels[i] == mbm) return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i); } return 0; } static int mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { struct mcr20a_local *lp = hw->priv; int ret; u8 rx_frame_filter_reg = 0x0; dev_dbg(printdev(lp), "%s(%d)\n", __func__, on); if (on) { /* All frame types accepted*/ rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER); rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT | IAR_RX_FRAME_FLT_NS_FT); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, DAR_PHY_CTRL4_PROMISCUOUS, DAR_PHY_CTRL4_PROMISCUOUS); if (ret < 0) return ret; ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, rx_frame_filter_reg); if (ret < 0) return ret; } else { ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, DAR_PHY_CTRL4_PROMISCUOUS, 0x0); if (ret < 0) return ret; ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, IAR_RX_FRAME_FLT_FRM_VER | IAR_RX_FRAME_FLT_BEACON_FT | IAR_RX_FRAME_FLT_DATA_FT | IAR_RX_FRAME_FLT_CMD_FT); if (ret < 0) return ret; } return 0; } static const struct ieee802154_ops mcr20a_hw_ops = { .owner = THIS_MODULE, .xmit_async = mcr20a_xmit, .ed = mcr20a_ed, .set_channel = mcr20a_set_channel, .start = mcr20a_start, .stop = mcr20a_stop, .set_hw_addr_filt = mcr20a_set_hw_addr_filt, .set_txpower = mcr20a_set_txpower, .set_cca_mode = mcr20a_set_cca_mode, .set_cca_ed_level = mcr20a_set_cca_ed_level, .set_promiscuous_mode = mcr20a_set_promiscuous_mode, }; static int mcr20a_request_rx(struct mcr20a_local *lp) { dev_dbg(printdev(lp), "%s\n", __func__); /* Start the RX sequence */ regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); return 0; } static void mcr20a_handle_rx_read_buf_complete(void *context) { struct mcr20a_local *lp = context; u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK; struct sk_buff *skb; dev_dbg(printdev(lp), "%s\n", __func__); dev_dbg(printdev(lp), "RX is done\n"); if (!ieee802154_is_valid_psdu_len(len)) { dev_vdbg(&lp->spi->dev, "corrupted frame received\n"); len = IEEE802154_MTU; } len = len - 2; /* get rid of frame check field */ skb = dev_alloc_skb(len); if (!skb) return; __skb_put_data(skb, lp->rx_buf, len); ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]); print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1, lp->rx_buf, len, 0); pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]); /* start RX sequence */ mcr20a_request_rx(lp); } static void mcr20a_handle_rx_read_len_complete(void *context) { struct mcr20a_local *lp = context; u8 len; int ret; dev_dbg(printdev(lp), "%s\n", __func__); /* get the length of received frame */ len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK; dev_dbg(printdev(lp), "frame len : %d\n", len); /* prepare to read the rx buf */ lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete; lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF; lp->rx_xfer_buf.len = len; ret = spi_async(lp->spi, &lp->rx_buf_msg); if (ret) dev_err(printdev(lp), "failed to read rx buffer length\n"); } static int mcr20a_handle_rx(struct mcr20a_local *lp) { dev_dbg(printdev(lp), "%s\n", __func__); lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete; lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN); lp->reg_xfer_data.len = 1; return spi_async(lp->spi, &lp->reg_msg); } static int mcr20a_handle_tx_complete(struct mcr20a_local *lp) { dev_dbg(printdev(lp), "%s\n", __func__); ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); return mcr20a_request_rx(lp); } static int mcr20a_handle_tx(struct mcr20a_local *lp) { int ret; dev_dbg(printdev(lp), "%s\n", __func__); /* write tx buffer */ lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF; /* add 2 bytes of FCS */ lp->tx_len[0] = lp->tx_skb->len + 2; lp->tx_xfer_buf.tx_buf = lp->tx_skb->data; /* add 1 byte psduLength */ lp->tx_xfer_buf.len = lp->tx_skb->len + 1; ret = spi_async(lp->spi, &lp->tx_buf_msg); if (ret) { dev_err(printdev(lp), "SPI write Failed for TX buf\n"); return ret; } return 0; } static void mcr20a_irq_clean_complete(void *context) { struct mcr20a_local *lp = context; u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK; dev_dbg(printdev(lp), "%s\n", __func__); enable_irq(lp->spi->irq); dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n", lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]); switch (seq_state) { /* TX IRQ, RX IRQ and SEQ IRQ */ case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { lp->is_tx = 0; dev_dbg(printdev(lp), "TX is done. No ACK\n"); mcr20a_handle_tx_complete(lp); } break; case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): /* rx is starting */ dev_dbg(printdev(lp), "RX is starting\n"); mcr20a_handle_rx(lp); break; case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { /* tx is done */ lp->is_tx = 0; dev_dbg(printdev(lp), "TX is done. Get ACK\n"); mcr20a_handle_tx_complete(lp); } else { /* rx is starting */ dev_dbg(printdev(lp), "RX is starting\n"); mcr20a_handle_rx(lp); } break; case (DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { dev_dbg(printdev(lp), "TX is starting\n"); mcr20a_handle_tx(lp); } else { dev_dbg(printdev(lp), "MCR20A is stop\n"); } break; } } static void mcr20a_irq_status_complete(void *context) { int ret; struct mcr20a_local *lp = context; dev_dbg(printdev(lp), "%s\n", __func__); regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE); lp->reg_msg.complete = mcr20a_irq_clean_complete; lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1); memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM); lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM; ret = spi_async(lp->spi, &lp->reg_msg); if (ret) dev_err(printdev(lp), "failed to clean irq status\n"); } static irqreturn_t mcr20a_irq_isr(int irq, void *data) { struct mcr20a_local *lp = data; int ret; disable_irq_nosync(irq); lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1); /* read IRQSTSx */ ret = spi_async(lp->spi, &lp->irq_msg); if (ret) { enable_irq(irq); return IRQ_NONE; } return IRQ_HANDLED; } static void mcr20a_hw_setup(struct mcr20a_local *lp) { u8 i; struct ieee802154_hw *hw = lp->hw; struct wpan_phy *phy = lp->hw->phy; dev_dbg(printdev(lp), "%s\n", __func__); hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); /* initiating cca_ed_levels */ for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1; ++i) { mcr20a_ed_levels[i] = -i * 100; } phy->supported.cca_ed_levels = mcr20a_ed_levels; phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels); phy->cca.mode = NL802154_CCA_ENERGY; phy->supported.channels[0] = MCR20A_VALID_CHANNELS; phy->current_page = 0; /* MCR20A default reset value */ phy->current_channel = 20; phy->supported.tx_powers = mcr20a_powers; phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers); phy->cca_ed_level = phy->supported.cca_ed_levels[75]; phy->transmit_power = phy->supported.tx_powers[0x0F]; } static void mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp) { spi_message_init(&lp->tx_buf_msg); lp->tx_buf_msg.context = lp; lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete; lp->tx_xfer_header.len = 1; lp->tx_xfer_header.tx_buf = lp->tx_header; lp->tx_xfer_len.len = 1; lp->tx_xfer_len.tx_buf = lp->tx_len; spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg); spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg); spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg); } static void mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp) { spi_message_init(&lp->reg_msg); lp->reg_msg.context = lp; lp->reg_xfer_cmd.len = 1; lp->reg_xfer_cmd.tx_buf = lp->reg_cmd; lp->reg_xfer_cmd.rx_buf = lp->reg_cmd; lp->reg_xfer_data.rx_buf = lp->reg_data; lp->reg_xfer_data.tx_buf = lp->reg_data; spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg); spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg); spi_message_init(&lp->rx_buf_msg); lp->rx_buf_msg.context = lp; lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete; lp->rx_xfer_header.len = 1; lp->rx_xfer_header.tx_buf = lp->rx_header; lp->rx_xfer_header.rx_buf = lp->rx_header; lp->rx_xfer_buf.rx_buf = lp->rx_buf; lp->rx_xfer_lqi.len = 1; lp->rx_xfer_lqi.rx_buf = lp->rx_lqi; spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg); spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg); spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg); } static void mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp) { spi_message_init(&lp->irq_msg); lp->irq_msg.context = lp; lp->irq_msg.complete = mcr20a_irq_status_complete; lp->irq_xfer_header.len = 1; lp->irq_xfer_header.tx_buf = lp->irq_header; lp->irq_xfer_header.rx_buf = lp->irq_header; lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM; lp->irq_xfer_data.rx_buf = lp->irq_data; spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg); spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg); } static int mcr20a_phy_init(struct mcr20a_local *lp) { u8 index; unsigned int phy_reg = 0; int ret; dev_dbg(printdev(lp), "%s\n", __func__); /* Disable Tristate on COCO MISO for SPI reads */ ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02); if (ret) goto err_ret; /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts * immediately after init */ ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF); if (ret) goto err_ret; /* Clear all PP IRQ bits in IRQSTS2 */ ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2, DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ | DAR_IRQSTS2_WAKE_IRQ); if (ret) goto err_ret; /* Disable all timer interrupts */ ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF); if (ret) goto err_ret; /* PHY_CTRL1 : default HW settings + AUTOACK enabled */ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK); /* PHY_CTRL2 : disable all interrupts */ ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF); if (ret) goto err_ret; /* PHY_CTRL3 : disable all timers and remaining interrupts */ ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3, DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK | DAR_PHY_CTRL3_WAKE_MSK); if (ret) goto err_ret; /* SRC_CTRL : enable Acknowledge Frame Pending and * Source Address Matching Enable */ ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, DAR_SRC_CTRL_ACK_FRM_PND | (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT)); if (ret) goto err_ret; /* RX_FRAME_FILTER */ /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */ ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, IAR_RX_FRAME_FLT_FRM_VER | IAR_RX_FRAME_FLT_BEACON_FT | IAR_RX_FRAME_FLT_DATA_FT | IAR_RX_FRAME_FLT_CMD_FT); if (ret) goto err_ret; dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n", MCR20A_OVERWRITE_VERSION); /* Overwrites direct registers */ ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER, MCR20A_OVERWRITE_VERSION); if (ret) goto err_ret; /* Overwrites indirect registers */ ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites, ARRAY_SIZE(mar20a_iar_overwrites)); if (ret) goto err_ret; /* Clear HW indirect queue */ dev_dbg(printdev(lp), "clear HW indirect queue\n"); for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) { phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) << DAR_SRC_CTRL_INDEX_SHIFT) | (DAR_SRC_CTRL_SRCADDR_EN) | (DAR_SRC_CTRL_INDEX_DISABLE)); ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg); if (ret) goto err_ret; phy_reg = 0; } /* Assign HW Indirect hash table to PAN0 */ ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg); if (ret) goto err_ret; /* Clear current lvl */ phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK; /* Set new lvl */ phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE << IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT; ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg); if (ret) goto err_ret; /* Set CCA threshold to -75 dBm */ ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B); if (ret) goto err_ret; /* Set prescaller to obtain 1 symbol (16us) timebase */ ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05); if (ret) goto err_ret; /* Enable autodoze mode. */ ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES, DAR_PWR_MODES_AUTODOZE, DAR_PWR_MODES_AUTODOZE); if (ret) goto err_ret; /* Disable clk_out */ ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL, DAR_CLK_OUT_CTRL_EN, 0x0); if (ret) goto err_ret; return 0; err_ret: return ret; } static int mcr20a_probe(struct spi_device *spi) { struct ieee802154_hw *hw; struct mcr20a_local *lp; struct gpio_desc *rst_b; int irq_type; int ret = -ENOMEM; dev_dbg(&spi->dev, "%s\n", __func__); if (!spi->irq) { dev_err(&spi->dev, "no IRQ specified\n"); return -EINVAL; } rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH); if (IS_ERR(rst_b)) return dev_err_probe(&spi->dev, PTR_ERR(rst_b), "Failed to get 'rst_b' gpio"); /* reset mcr20a */ usleep_range(10, 20); gpiod_set_value_cansleep(rst_b, 1); usleep_range(10, 20); gpiod_set_value_cansleep(rst_b, 0); usleep_range(120, 240); /* allocate ieee802154_hw and private data */ hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops); if (!hw) { dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n"); return ret; } /* init mcr20a local data */ lp = hw->priv; lp->hw = hw; lp->spi = spi; /* init ieee802154_hw */ hw->parent = &spi->dev; ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); /* init buf */ lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL); if (!lp->buf) { ret = -ENOMEM; goto free_dev; } mcr20a_setup_tx_spi_messages(lp); mcr20a_setup_rx_spi_messages(lp); mcr20a_setup_irq_spi_messages(lp); /* setup regmap */ lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap); if (IS_ERR(lp->regmap_dar)) { ret = PTR_ERR(lp->regmap_dar); dev_err(&spi->dev, "Failed to allocate dar map: %d\n", ret); goto free_dev; } lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap); if (IS_ERR(lp->regmap_iar)) { ret = PTR_ERR(lp->regmap_iar); dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret); goto free_dev; } mcr20a_hw_setup(lp); spi_set_drvdata(spi, lp); ret = mcr20a_phy_init(lp); if (ret < 0) { dev_crit(&spi->dev, "mcr20a_phy_init failed\n"); goto free_dev; } irq_type = irq_get_trigger_type(spi->irq); if (!irq_type) irq_type = IRQF_TRIGGER_FALLING; ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr, irq_type, dev_name(&spi->dev), lp); if (ret) { dev_err(&spi->dev, "could not request_irq for mcr20a\n"); ret = -ENODEV; goto free_dev; } /* disable_irq by default and wait for starting hardware */ disable_irq(spi->irq); ret = ieee802154_register_hw(hw); if (ret) { dev_crit(&spi->dev, "ieee802154_register_hw failed\n"); goto free_dev; } return ret; free_dev: ieee802154_free_hw(lp->hw); return ret; } static void mcr20a_remove(struct spi_device *spi) { struct mcr20a_local *lp = spi_get_drvdata(spi); dev_dbg(&spi->dev, "%s\n", __func__); ieee802154_unregister_hw(lp->hw); ieee802154_free_hw(lp->hw); } static const struct of_device_id mcr20a_of_match[] = { { .compatible = "nxp,mcr20a", }, { }, }; MODULE_DEVICE_TABLE(of, mcr20a_of_match); static const struct spi_device_id mcr20a_device_id[] = { { .name = "mcr20a", }, { }, }; MODULE_DEVICE_TABLE(spi, mcr20a_device_id); static struct spi_driver mcr20a_driver = { .id_table = mcr20a_device_id, .driver = { .of_match_table = mcr20a_of_match, .name = "mcr20a", }, .probe = mcr20a_probe, .remove = mcr20a_remove, }; module_spi_driver(mcr20a_driver); MODULE_DESCRIPTION("MCR20A Transceiver Driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>");
linux-master
drivers/net/ieee802154/mcr20a.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for Microchip MRF24J40 802.15.4 Wireless-PAN Networking controller * * Copyright (C) 2012 Alan Ott <[email protected]> * Signal 11 Software */ #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/ieee802154.h> #include <linux/irq.h> #include <net/cfg802154.h> #include <net/mac802154.h> /* MRF24J40 Short Address Registers */ #define REG_RXMCR 0x00 /* Receive MAC control */ #define BIT_PROMI BIT(0) #define BIT_ERRPKT BIT(1) #define BIT_NOACKRSP BIT(5) #define BIT_PANCOORD BIT(3) #define REG_PANIDL 0x01 /* PAN ID (low) */ #define REG_PANIDH 0x02 /* PAN ID (high) */ #define REG_SADRL 0x03 /* Short address (low) */ #define REG_SADRH 0x04 /* Short address (high) */ #define REG_EADR0 0x05 /* Long address (low) (high is EADR7) */ #define REG_EADR1 0x06 #define REG_EADR2 0x07 #define REG_EADR3 0x08 #define REG_EADR4 0x09 #define REG_EADR5 0x0A #define REG_EADR6 0x0B #define REG_EADR7 0x0C #define REG_RXFLUSH 0x0D #define REG_ORDER 0x10 #define REG_TXMCR 0x11 /* Transmit MAC control */ #define TXMCR_MIN_BE_SHIFT 3 #define TXMCR_MIN_BE_MASK 0x18 #define TXMCR_CSMA_RETRIES_SHIFT 0 #define TXMCR_CSMA_RETRIES_MASK 0x07 #define REG_ACKTMOUT 0x12 #define REG_ESLOTG1 0x13 #define REG_SYMTICKL 0x14 #define REG_SYMTICKH 0x15 #define REG_PACON0 0x16 /* Power Amplifier Control */ #define REG_PACON1 0x17 /* Power Amplifier Control */ #define REG_PACON2 0x18 /* Power Amplifier Control */ #define REG_TXBCON0 0x1A #define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */ #define BIT_TXNTRIG BIT(0) #define BIT_TXNSECEN BIT(1) #define BIT_TXNACKREQ BIT(2) #define REG_TXG1CON 0x1C #define REG_TXG2CON 0x1D #define REG_ESLOTG23 0x1E #define REG_ESLOTG45 0x1F #define REG_ESLOTG67 0x20 #define REG_TXPEND 0x21 #define REG_WAKECON 0x22 #define REG_FROMOFFSET 0x23 #define REG_TXSTAT 0x24 /* TX MAC Status Register */ #define REG_TXBCON1 0x25 #define REG_GATECLK 0x26 #define REG_TXTIME 0x27 #define REG_HSYMTMRL 0x28 #define REG_HSYMTMRH 0x29 #define REG_SOFTRST 0x2A /* Soft Reset */ #define REG_SECCON0 0x2C #define REG_SECCON1 0x2D #define REG_TXSTBL 0x2E /* TX Stabilization */ #define REG_RXSR 0x30 #define REG_INTSTAT 0x31 /* Interrupt Status */ #define BIT_TXNIF BIT(0) #define BIT_RXIF BIT(3) #define BIT_SECIF BIT(4) #define BIT_SECIGNORE BIT(7) #define REG_INTCON 0x32 /* Interrupt Control */ #define BIT_TXNIE BIT(0) #define BIT_RXIE BIT(3) #define BIT_SECIE BIT(4) #define REG_GPIO 0x33 /* GPIO */ #define REG_TRISGPIO 0x34 /* GPIO direction */ #define REG_SLPACK 0x35 #define REG_RFCTL 0x36 /* RF Control Mode Register */ #define BIT_RFRST BIT(2) #define REG_SECCR2 0x37 #define REG_BBREG0 0x38 #define REG_BBREG1 0x39 /* Baseband Registers */ #define BIT_RXDECINV BIT(2) #define REG_BBREG2 0x3A /* */ #define BBREG2_CCA_MODE_SHIFT 6 #define BBREG2_CCA_MODE_MASK 0xc0 #define REG_BBREG3 0x3B #define REG_BBREG4 0x3C #define REG_BBREG6 0x3E /* */ #define REG_CCAEDTH 0x3F /* Energy Detection Threshold */ /* MRF24J40 Long Address Registers */ #define REG_RFCON0 0x200 /* RF Control Registers */ #define RFCON0_CH_SHIFT 4 #define RFCON0_CH_MASK 0xf0 #define RFOPT_RECOMMEND 3 #define REG_RFCON1 0x201 #define REG_RFCON2 0x202 #define REG_RFCON3 0x203 #define TXPWRL_MASK 0xc0 #define TXPWRL_SHIFT 6 #define TXPWRL_30 0x3 #define TXPWRL_20 0x2 #define TXPWRL_10 0x1 #define TXPWRL_0 0x0 #define TXPWRS_MASK 0x38 #define TXPWRS_SHIFT 3 #define TXPWRS_6_3 0x7 #define TXPWRS_4_9 0x6 #define TXPWRS_3_7 0x5 #define TXPWRS_2_8 0x4 #define TXPWRS_1_9 0x3 #define TXPWRS_1_2 0x2 #define TXPWRS_0_5 0x1 #define TXPWRS_0 0x0 #define REG_RFCON5 0x205 #define REG_RFCON6 0x206 #define REG_RFCON7 0x207 #define REG_RFCON8 0x208 #define REG_SLPCAL0 0x209 #define REG_SLPCAL1 0x20A #define REG_SLPCAL2 0x20B #define REG_RFSTATE 0x20F #define REG_RSSI 0x210 #define REG_SLPCON0 0x211 /* Sleep Clock Control Registers */ #define BIT_INTEDGE BIT(1) #define REG_SLPCON1 0x220 #define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */ #define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */ #define REG_REMCNTL 0x224 #define REG_REMCNTH 0x225 #define REG_MAINCNT0 0x226 #define REG_MAINCNT1 0x227 #define REG_MAINCNT2 0x228 #define REG_MAINCNT3 0x229 #define REG_TESTMODE 0x22F /* Test mode */ #define REG_ASSOEAR0 0x230 #define REG_ASSOEAR1 0x231 #define REG_ASSOEAR2 0x232 #define REG_ASSOEAR3 0x233 #define REG_ASSOEAR4 0x234 #define REG_ASSOEAR5 0x235 #define REG_ASSOEAR6 0x236 #define REG_ASSOEAR7 0x237 #define REG_ASSOSAR0 0x238 #define REG_ASSOSAR1 0x239 #define REG_UNONCE0 0x240 #define REG_UNONCE1 0x241 #define REG_UNONCE2 0x242 #define REG_UNONCE3 0x243 #define REG_UNONCE4 0x244 #define REG_UNONCE5 0x245 #define REG_UNONCE6 0x246 #define REG_UNONCE7 0x247 #define REG_UNONCE8 0x248 #define REG_UNONCE9 0x249 #define REG_UNONCE10 0x24A #define REG_UNONCE11 0x24B #define REG_UNONCE12 0x24C #define REG_RX_FIFO 0x300 /* Receive FIFO */ /* Device configuration: Only channels 11-26 on page 0 are supported. */ #define MRF24J40_CHAN_MIN 11 #define MRF24J40_CHAN_MAX 26 #define CHANNEL_MASK (((u32)1 << (MRF24J40_CHAN_MAX + 1)) \ - ((u32)1 << MRF24J40_CHAN_MIN)) #define TX_FIFO_SIZE 128 /* From datasheet */ #define RX_FIFO_SIZE 144 /* From datasheet */ #define SET_CHANNEL_DELAY_US 192 /* From datasheet */ enum mrf24j40_modules { MRF24J40, MRF24J40MA, MRF24J40MC }; /* Device Private Data */ struct mrf24j40 { struct spi_device *spi; struct ieee802154_hw *hw; struct regmap *regmap_short; struct regmap *regmap_long; /* for writing txfifo */ struct spi_message tx_msg; u8 tx_hdr_buf[2]; struct spi_transfer tx_hdr_trx; u8 tx_len_buf[2]; struct spi_transfer tx_len_trx; struct spi_transfer tx_buf_trx; struct sk_buff *tx_skb; /* post transmit message to send frame out */ struct spi_message tx_post_msg; u8 tx_post_buf[2]; struct spi_transfer tx_post_trx; /* for protect/unprotect/read length rxfifo */ struct spi_message rx_msg; u8 rx_buf[3]; struct spi_transfer rx_trx; /* receive handling */ struct spi_message rx_buf_msg; u8 rx_addr_buf[2]; struct spi_transfer rx_addr_trx; u8 rx_lqi_buf[2]; struct spi_transfer rx_lqi_trx; u8 rx_fifo_buf[RX_FIFO_SIZE]; struct spi_transfer rx_fifo_buf_trx; /* isr handling for reading intstat */ struct spi_message irq_msg; u8 irq_buf[2]; struct spi_transfer irq_trx; }; /* regmap information for short address register access */ #define MRF24J40_SHORT_WRITE 0x01 #define MRF24J40_SHORT_READ 0x00 #define MRF24J40_SHORT_NUMREGS 0x3F /* regmap information for long address register access */ #define MRF24J40_LONG_ACCESS 0x80 #define MRF24J40_LONG_NUMREGS 0x38F /* Read/Write SPI Commands for Short and Long Address registers. */ #define MRF24J40_READSHORT(reg) ((reg) << 1) #define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1) #define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5) #define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4) /* The datasheet indicates the theoretical maximum for SCK to be 10MHz */ #define MAX_SPI_SPEED_HZ 10000000 #define printdev(X) (&X->spi->dev) static bool mrf24j40_short_reg_writeable(struct device *dev, unsigned int reg) { switch (reg) { case REG_RXMCR: case REG_PANIDL: case REG_PANIDH: case REG_SADRL: case REG_SADRH: case REG_EADR0: case REG_EADR1: case REG_EADR2: case REG_EADR3: case REG_EADR4: case REG_EADR5: case REG_EADR6: case REG_EADR7: case REG_RXFLUSH: case REG_ORDER: case REG_TXMCR: case REG_ACKTMOUT: case REG_ESLOTG1: case REG_SYMTICKL: case REG_SYMTICKH: case REG_PACON0: case REG_PACON1: case REG_PACON2: case REG_TXBCON0: case REG_TXNCON: case REG_TXG1CON: case REG_TXG2CON: case REG_ESLOTG23: case REG_ESLOTG45: case REG_ESLOTG67: case REG_TXPEND: case REG_WAKECON: case REG_FROMOFFSET: case REG_TXBCON1: case REG_GATECLK: case REG_TXTIME: case REG_HSYMTMRL: case REG_HSYMTMRH: case REG_SOFTRST: case REG_SECCON0: case REG_SECCON1: case REG_TXSTBL: case REG_RXSR: case REG_INTCON: case REG_TRISGPIO: case REG_GPIO: case REG_RFCTL: case REG_SECCR2: case REG_SLPACK: case REG_BBREG0: case REG_BBREG1: case REG_BBREG2: case REG_BBREG3: case REG_BBREG4: case REG_BBREG6: case REG_CCAEDTH: return true; default: return false; } } static bool mrf24j40_short_reg_readable(struct device *dev, unsigned int reg) { bool rc; /* all writeable are also readable */ rc = mrf24j40_short_reg_writeable(dev, reg); if (rc) return rc; /* readonly regs */ switch (reg) { case REG_TXSTAT: case REG_INTSTAT: return true; default: return false; } } static bool mrf24j40_short_reg_volatile(struct device *dev, unsigned int reg) { /* can be changed during runtime */ switch (reg) { case REG_TXSTAT: case REG_INTSTAT: case REG_RXFLUSH: case REG_TXNCON: case REG_SOFTRST: case REG_RFCTL: case REG_TXBCON0: case REG_TXG1CON: case REG_TXG2CON: case REG_TXBCON1: case REG_SECCON0: case REG_RXSR: case REG_SLPACK: case REG_SECCR2: case REG_BBREG6: /* use them in spi_async and regmap so it's volatile */ case REG_BBREG1: return true; default: return false; } } static bool mrf24j40_short_reg_precious(struct device *dev, unsigned int reg) { /* don't clear irq line on read */ switch (reg) { case REG_INTSTAT: return true; default: return false; } } static const struct regmap_config mrf24j40_short_regmap = { .name = "mrf24j40_short", .reg_bits = 7, .val_bits = 8, .pad_bits = 1, .write_flag_mask = MRF24J40_SHORT_WRITE, .read_flag_mask = MRF24J40_SHORT_READ, .cache_type = REGCACHE_RBTREE, .max_register = MRF24J40_SHORT_NUMREGS, .writeable_reg = mrf24j40_short_reg_writeable, .readable_reg = mrf24j40_short_reg_readable, .volatile_reg = mrf24j40_short_reg_volatile, .precious_reg = mrf24j40_short_reg_precious, }; static bool mrf24j40_long_reg_writeable(struct device *dev, unsigned int reg) { switch (reg) { case REG_RFCON0: case REG_RFCON1: case REG_RFCON2: case REG_RFCON3: case REG_RFCON5: case REG_RFCON6: case REG_RFCON7: case REG_RFCON8: case REG_SLPCAL2: case REG_SLPCON0: case REG_SLPCON1: case REG_WAKETIMEL: case REG_WAKETIMEH: case REG_REMCNTL: case REG_REMCNTH: case REG_MAINCNT0: case REG_MAINCNT1: case REG_MAINCNT2: case REG_MAINCNT3: case REG_TESTMODE: case REG_ASSOEAR0: case REG_ASSOEAR1: case REG_ASSOEAR2: case REG_ASSOEAR3: case REG_ASSOEAR4: case REG_ASSOEAR5: case REG_ASSOEAR6: case REG_ASSOEAR7: case REG_ASSOSAR0: case REG_ASSOSAR1: case REG_UNONCE0: case REG_UNONCE1: case REG_UNONCE2: case REG_UNONCE3: case REG_UNONCE4: case REG_UNONCE5: case REG_UNONCE6: case REG_UNONCE7: case REG_UNONCE8: case REG_UNONCE9: case REG_UNONCE10: case REG_UNONCE11: case REG_UNONCE12: return true; default: return false; } } static bool mrf24j40_long_reg_readable(struct device *dev, unsigned int reg) { bool rc; /* all writeable are also readable */ rc = mrf24j40_long_reg_writeable(dev, reg); if (rc) return rc; /* readonly regs */ switch (reg) { case REG_SLPCAL0: case REG_SLPCAL1: case REG_RFSTATE: case REG_RSSI: return true; default: return false; } } static bool mrf24j40_long_reg_volatile(struct device *dev, unsigned int reg) { /* can be changed during runtime */ switch (reg) { case REG_SLPCAL0: case REG_SLPCAL1: case REG_SLPCAL2: case REG_RFSTATE: case REG_RSSI: case REG_MAINCNT3: return true; default: return false; } } static const struct regmap_config mrf24j40_long_regmap = { .name = "mrf24j40_long", .reg_bits = 11, .val_bits = 8, .pad_bits = 5, .write_flag_mask = MRF24J40_LONG_ACCESS, .read_flag_mask = MRF24J40_LONG_ACCESS, .cache_type = REGCACHE_RBTREE, .max_register = MRF24J40_LONG_NUMREGS, .writeable_reg = mrf24j40_long_reg_writeable, .readable_reg = mrf24j40_long_reg_readable, .volatile_reg = mrf24j40_long_reg_volatile, }; static int mrf24j40_long_regmap_write(void *context, const void *data, size_t count) { struct spi_device *spi = context; u8 buf[3]; if (count > 3) return -EINVAL; /* regmap supports read/write mask only in frist byte * long write access need to set the 12th bit, so we * make special handling for write. */ memcpy(buf, data, count); buf[1] |= (1 << 4); return spi_write(spi, buf, count); } static int mrf24j40_long_regmap_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { struct spi_device *spi = context; return spi_write_then_read(spi, reg, reg_size, val, val_size); } static const struct regmap_bus mrf24j40_long_regmap_bus = { .write = mrf24j40_long_regmap_write, .read = mrf24j40_long_regmap_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, .val_format_endian_default = REGMAP_ENDIAN_BIG, }; static void write_tx_buf_complete(void *context) { struct mrf24j40 *devrec = context; __le16 fc = ieee802154_get_fc_from_skb(devrec->tx_skb); u8 val = BIT_TXNTRIG; int ret; if (ieee802154_is_secen(fc)) val |= BIT_TXNSECEN; if (ieee802154_is_ackreq(fc)) val |= BIT_TXNACKREQ; devrec->tx_post_msg.complete = NULL; devrec->tx_post_buf[0] = MRF24J40_WRITESHORT(REG_TXNCON); devrec->tx_post_buf[1] = val; ret = spi_async(devrec->spi, &devrec->tx_post_msg); if (ret) dev_err(printdev(devrec), "SPI write Failed for transmit buf\n"); } /* This function relies on an undocumented write method. Once a write command and address is set, as many bytes of data as desired can be clocked into the device. The datasheet only shows setting one byte at a time. */ static int write_tx_buf(struct mrf24j40 *devrec, u16 reg, const u8 *data, size_t length) { u16 cmd; int ret; /* Range check the length. 2 bytes are used for the length fields.*/ if (length > TX_FIFO_SIZE-2) { dev_err(printdev(devrec), "write_tx_buf() was passed too large a buffer. Performing short write.\n"); length = TX_FIFO_SIZE-2; } cmd = MRF24J40_WRITELONG(reg); devrec->tx_hdr_buf[0] = cmd >> 8 & 0xff; devrec->tx_hdr_buf[1] = cmd & 0xff; devrec->tx_len_buf[0] = 0x0; /* Header Length. Set to 0 for now. TODO */ devrec->tx_len_buf[1] = length; /* Total length */ devrec->tx_buf_trx.tx_buf = data; devrec->tx_buf_trx.len = length; ret = spi_async(devrec->spi, &devrec->tx_msg); if (ret) dev_err(printdev(devrec), "SPI write Failed for TX buf\n"); return ret; } static int mrf24j40_tx(struct ieee802154_hw *hw, struct sk_buff *skb) { struct mrf24j40 *devrec = hw->priv; dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len); devrec->tx_skb = skb; return write_tx_buf(devrec, 0x000, skb->data, skb->len); } static int mrf24j40_ed(struct ieee802154_hw *hw, u8 *level) { /* TODO: */ pr_warn("mrf24j40: ed not implemented\n"); *level = 0; return 0; } static int mrf24j40_start(struct ieee802154_hw *hw) { struct mrf24j40 *devrec = hw->priv; dev_dbg(printdev(devrec), "start\n"); /* Clear TXNIE and RXIE. Enable interrupts */ return regmap_update_bits(devrec->regmap_short, REG_INTCON, BIT_TXNIE | BIT_RXIE | BIT_SECIE, 0); } static void mrf24j40_stop(struct ieee802154_hw *hw) { struct mrf24j40 *devrec = hw->priv; dev_dbg(printdev(devrec), "stop\n"); /* Set TXNIE and RXIE. Disable Interrupts */ regmap_update_bits(devrec->regmap_short, REG_INTCON, BIT_TXNIE | BIT_RXIE, BIT_TXNIE | BIT_RXIE); } static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct mrf24j40 *devrec = hw->priv; u8 val; int ret; dev_dbg(printdev(devrec), "Set Channel %d\n", channel); WARN_ON(page != 0); WARN_ON(channel < MRF24J40_CHAN_MIN); WARN_ON(channel > MRF24J40_CHAN_MAX); /* Set Channel TODO */ val = (channel - 11) << RFCON0_CH_SHIFT | RFOPT_RECOMMEND; ret = regmap_update_bits(devrec->regmap_long, REG_RFCON0, RFCON0_CH_MASK, val); if (ret) return ret; /* RF Reset */ ret = regmap_update_bits(devrec->regmap_short, REG_RFCTL, BIT_RFRST, BIT_RFRST); if (ret) return ret; ret = regmap_update_bits(devrec->regmap_short, REG_RFCTL, BIT_RFRST, 0); if (!ret) udelay(SET_CHANNEL_DELAY_US); /* per datasheet */ return ret; } static int mrf24j40_filter(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { struct mrf24j40 *devrec = hw->priv; dev_dbg(printdev(devrec), "filter\n"); if (changed & IEEE802154_AFILT_SADDR_CHANGED) { /* Short Addr */ u8 addrh, addrl; addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff; addrl = le16_to_cpu(filt->short_addr) & 0xff; regmap_write(devrec->regmap_short, REG_SADRH, addrh); regmap_write(devrec->regmap_short, REG_SADRL, addrl); dev_dbg(printdev(devrec), "Set short addr to %04hx\n", filt->short_addr); } if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { /* Device Address */ u8 i, addr[8]; memcpy(addr, &filt->ieee_addr, 8); for (i = 0; i < 8; i++) regmap_write(devrec->regmap_short, REG_EADR0 + i, addr[i]); #ifdef DEBUG pr_debug("Set long addr to: "); for (i = 0; i < 8; i++) pr_debug("%02hhx ", addr[7 - i]); pr_debug("\n"); #endif } if (changed & IEEE802154_AFILT_PANID_CHANGED) { /* PAN ID */ u8 panidl, panidh; panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff; panidl = le16_to_cpu(filt->pan_id) & 0xff; regmap_write(devrec->regmap_short, REG_PANIDH, panidh); regmap_write(devrec->regmap_short, REG_PANIDL, panidl); dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { /* Pan Coordinator */ u8 val; int ret; if (filt->pan_coord) val = BIT_PANCOORD; else val = 0; ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, BIT_PANCOORD, val); if (ret) return ret; /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA). * REG_ORDER is maintained as default (no beacon/superframe). */ dev_dbg(printdev(devrec), "Set Pan Coord to %s\n", filt->pan_coord ? "on" : "off"); } return 0; } static void mrf24j40_handle_rx_read_buf_unlock(struct mrf24j40 *devrec) { int ret; /* Turn back on reception of packets off the air. */ devrec->rx_msg.complete = NULL; devrec->rx_buf[0] = MRF24J40_WRITESHORT(REG_BBREG1); devrec->rx_buf[1] = 0x00; /* CLR RXDECINV */ ret = spi_async(devrec->spi, &devrec->rx_msg); if (ret) dev_err(printdev(devrec), "failed to unlock rx buffer\n"); } static void mrf24j40_handle_rx_read_buf_complete(void *context) { struct mrf24j40 *devrec = context; u8 len = devrec->rx_buf[2]; u8 rx_local_buf[RX_FIFO_SIZE]; struct sk_buff *skb; memcpy(rx_local_buf, devrec->rx_fifo_buf, len); mrf24j40_handle_rx_read_buf_unlock(devrec); skb = dev_alloc_skb(IEEE802154_MTU); if (!skb) { dev_err(printdev(devrec), "failed to allocate skb\n"); return; } skb_put_data(skb, rx_local_buf, len); ieee802154_rx_irqsafe(devrec->hw, skb, 0); #ifdef DEBUG print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", DUMP_PREFIX_OFFSET, 16, 1, rx_local_buf, len, 0); pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n", devrec->rx_lqi_buf[0], devrec->rx_lqi_buf[1]); #endif } static void mrf24j40_handle_rx_read_buf(void *context) { struct mrf24j40 *devrec = context; u16 cmd; int ret; /* if length is invalid read the full MTU */ if (!ieee802154_is_valid_psdu_len(devrec->rx_buf[2])) devrec->rx_buf[2] = IEEE802154_MTU; cmd = MRF24J40_READLONG(REG_RX_FIFO + 1); devrec->rx_addr_buf[0] = cmd >> 8 & 0xff; devrec->rx_addr_buf[1] = cmd & 0xff; devrec->rx_fifo_buf_trx.len = devrec->rx_buf[2]; ret = spi_async(devrec->spi, &devrec->rx_buf_msg); if (ret) { dev_err(printdev(devrec), "failed to read rx buffer\n"); mrf24j40_handle_rx_read_buf_unlock(devrec); } } static void mrf24j40_handle_rx_read_len(void *context) { struct mrf24j40 *devrec = context; u16 cmd; int ret; /* read the length of received frame */ devrec->rx_msg.complete = mrf24j40_handle_rx_read_buf; devrec->rx_trx.len = 3; cmd = MRF24J40_READLONG(REG_RX_FIFO); devrec->rx_buf[0] = cmd >> 8 & 0xff; devrec->rx_buf[1] = cmd & 0xff; ret = spi_async(devrec->spi, &devrec->rx_msg); if (ret) { dev_err(printdev(devrec), "failed to read rx buffer length\n"); mrf24j40_handle_rx_read_buf_unlock(devrec); } } static int mrf24j40_handle_rx(struct mrf24j40 *devrec) { /* Turn off reception of packets off the air. This prevents the * device from overwriting the buffer while we're reading it. */ devrec->rx_msg.complete = mrf24j40_handle_rx_read_len; devrec->rx_trx.len = 2; devrec->rx_buf[0] = MRF24J40_WRITESHORT(REG_BBREG1); devrec->rx_buf[1] = BIT_RXDECINV; /* SET RXDECINV */ return spi_async(devrec->spi, &devrec->rx_msg); } static int mrf24j40_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) { struct mrf24j40 *devrec = hw->priv; u8 val; /* min_be */ val = min_be << TXMCR_MIN_BE_SHIFT; /* csma backoffs */ val |= retries << TXMCR_CSMA_RETRIES_SHIFT; return regmap_update_bits(devrec->regmap_short, REG_TXMCR, TXMCR_MIN_BE_MASK | TXMCR_CSMA_RETRIES_MASK, val); } static int mrf24j40_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca) { struct mrf24j40 *devrec = hw->priv; u8 val; /* mapping 802.15.4 to driver spec */ switch (cca->mode) { case NL802154_CCA_ENERGY: val = 2; break; case NL802154_CCA_CARRIER: val = 1; break; case NL802154_CCA_ENERGY_CARRIER: switch (cca->opt) { case NL802154_CCA_OPT_ENERGY_CARRIER_AND: val = 3; break; default: return -EINVAL; } break; default: return -EINVAL; } return regmap_update_bits(devrec->regmap_short, REG_BBREG2, BBREG2_CCA_MODE_MASK, val << BBREG2_CCA_MODE_SHIFT); } /* array for representing ed levels */ static const s32 mrf24j40_ed_levels[] = { -9000, -8900, -8800, -8700, -8600, -8500, -8400, -8300, -8200, -8100, -8000, -7900, -7800, -7700, -7600, -7500, -7400, -7300, -7200, -7100, -7000, -6900, -6800, -6700, -6600, -6500, -6400, -6300, -6200, -6100, -6000, -5900, -5800, -5700, -5600, -5500, -5400, -5300, -5200, -5100, -5000, -4900, -4800, -4700, -4600, -4500, -4400, -4300, -4200, -4100, -4000, -3900, -3800, -3700, -3600, -3500 }; /* map ed levels to register value */ static const s32 mrf24j40_ed_levels_map[][2] = { { -9000, 0 }, { -8900, 1 }, { -8800, 2 }, { -8700, 5 }, { -8600, 9 }, { -8500, 13 }, { -8400, 18 }, { -8300, 23 }, { -8200, 27 }, { -8100, 32 }, { -8000, 37 }, { -7900, 43 }, { -7800, 48 }, { -7700, 53 }, { -7600, 58 }, { -7500, 63 }, { -7400, 68 }, { -7300, 73 }, { -7200, 78 }, { -7100, 83 }, { -7000, 89 }, { -6900, 95 }, { -6800, 100 }, { -6700, 107 }, { -6600, 111 }, { -6500, 117 }, { -6400, 121 }, { -6300, 125 }, { -6200, 129 }, { -6100, 133 }, { -6000, 138 }, { -5900, 143 }, { -5800, 148 }, { -5700, 153 }, { -5600, 159 }, { -5500, 165 }, { -5400, 170 }, { -5300, 176 }, { -5200, 183 }, { -5100, 188 }, { -5000, 193 }, { -4900, 198 }, { -4800, 203 }, { -4700, 207 }, { -4600, 212 }, { -4500, 216 }, { -4400, 221 }, { -4300, 225 }, { -4200, 228 }, { -4100, 233 }, { -4000, 239 }, { -3900, 245 }, { -3800, 250 }, { -3700, 253 }, { -3600, 254 }, { -3500, 255 }, }; static int mrf24j40_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct mrf24j40 *devrec = hw->priv; int i; for (i = 0; i < ARRAY_SIZE(mrf24j40_ed_levels_map); i++) { if (mrf24j40_ed_levels_map[i][0] == mbm) return regmap_write(devrec->regmap_short, REG_CCAEDTH, mrf24j40_ed_levels_map[i][1]); } return -EINVAL; } static const s32 mrf24j40ma_powers[] = { 0, -50, -120, -190, -280, -370, -490, -630, -1000, -1050, -1120, -1190, -1280, -1370, -1490, -1630, -2000, -2050, -2120, -2190, -2280, -2370, -2490, -2630, -3000, -3050, -3120, -3190, -3280, -3370, -3490, -3630, }; static int mrf24j40_set_txpower(struct ieee802154_hw *hw, s32 mbm) { struct mrf24j40 *devrec = hw->priv; s32 small_scale; u8 val; if (0 >= mbm && mbm > -1000) { val = TXPWRL_0 << TXPWRL_SHIFT; small_scale = mbm; } else if (-1000 >= mbm && mbm > -2000) { val = TXPWRL_10 << TXPWRL_SHIFT; small_scale = mbm + 1000; } else if (-2000 >= mbm && mbm > -3000) { val = TXPWRL_20 << TXPWRL_SHIFT; small_scale = mbm + 2000; } else if (-3000 >= mbm && mbm > -4000) { val = TXPWRL_30 << TXPWRL_SHIFT; small_scale = mbm + 3000; } else { return -EINVAL; } switch (small_scale) { case 0: val |= (TXPWRS_0 << TXPWRS_SHIFT); break; case -50: val |= (TXPWRS_0_5 << TXPWRS_SHIFT); break; case -120: val |= (TXPWRS_1_2 << TXPWRS_SHIFT); break; case -190: val |= (TXPWRS_1_9 << TXPWRS_SHIFT); break; case -280: val |= (TXPWRS_2_8 << TXPWRS_SHIFT); break; case -370: val |= (TXPWRS_3_7 << TXPWRS_SHIFT); break; case -490: val |= (TXPWRS_4_9 << TXPWRS_SHIFT); break; case -630: val |= (TXPWRS_6_3 << TXPWRS_SHIFT); break; default: return -EINVAL; } return regmap_update_bits(devrec->regmap_long, REG_RFCON3, TXPWRL_MASK | TXPWRS_MASK, val); } static int mrf24j40_set_promiscuous_mode(struct ieee802154_hw *hw, bool on) { struct mrf24j40 *devrec = hw->priv; int ret; if (on) { /* set PROMI, ERRPKT and NOACKRSP */ ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP, BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP); } else { /* clear PROMI, ERRPKT and NOACKRSP */ ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP, 0); } return ret; } static const struct ieee802154_ops mrf24j40_ops = { .owner = THIS_MODULE, .xmit_async = mrf24j40_tx, .ed = mrf24j40_ed, .start = mrf24j40_start, .stop = mrf24j40_stop, .set_channel = mrf24j40_set_channel, .set_hw_addr_filt = mrf24j40_filter, .set_csma_params = mrf24j40_csma_params, .set_cca_mode = mrf24j40_set_cca_mode, .set_cca_ed_level = mrf24j40_set_cca_ed_level, .set_txpower = mrf24j40_set_txpower, .set_promiscuous_mode = mrf24j40_set_promiscuous_mode, }; static void mrf24j40_intstat_complete(void *context) { struct mrf24j40 *devrec = context; u8 intstat = devrec->irq_buf[1]; enable_irq(devrec->spi->irq); /* Ignore Rx security decryption */ if (intstat & BIT_SECIF) regmap_write_async(devrec->regmap_short, REG_SECCON0, BIT_SECIGNORE); /* Check for TX complete */ if (intstat & BIT_TXNIF) ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false); /* Check for Rx */ if (intstat & BIT_RXIF) mrf24j40_handle_rx(devrec); } static irqreturn_t mrf24j40_isr(int irq, void *data) { struct mrf24j40 *devrec = data; int ret; disable_irq_nosync(irq); devrec->irq_buf[0] = MRF24J40_READSHORT(REG_INTSTAT); devrec->irq_buf[1] = 0; /* Read the interrupt status */ ret = spi_async(devrec->spi, &devrec->irq_msg); if (ret) { enable_irq(irq); return IRQ_NONE; } return IRQ_HANDLED; } static int mrf24j40_hw_init(struct mrf24j40 *devrec) { u32 irq_type; int ret; /* Initialize the device. From datasheet section 3.2: Initialization. */ ret = regmap_write(devrec->regmap_short, REG_SOFTRST, 0x07); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_short, REG_PACON2, 0x98); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_short, REG_TXSTBL, 0x95); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_long, REG_RFCON0, 0x03); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_long, REG_RFCON1, 0x01); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_long, REG_RFCON2, 0x80); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_long, REG_RFCON6, 0x90); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_long, REG_RFCON7, 0x80); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_long, REG_RFCON8, 0x10); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_long, REG_SLPCON1, 0x21); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_short, REG_BBREG2, 0x80); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_short, REG_CCAEDTH, 0x60); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_short, REG_BBREG6, 0x40); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_short, REG_RFCTL, 0x04); if (ret) goto err_ret; ret = regmap_write(devrec->regmap_short, REG_RFCTL, 0x0); if (ret) goto err_ret; udelay(192); /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */ ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, 0x03, 0x00); if (ret) goto err_ret; if (spi_get_device_id(devrec->spi)->driver_data == MRF24J40MC) { /* Enable external amplifier. * From MRF24J40MC datasheet section 1.3: Operation. */ regmap_update_bits(devrec->regmap_long, REG_TESTMODE, 0x07, 0x07); /* Set GPIO3 as output. */ regmap_update_bits(devrec->regmap_short, REG_TRISGPIO, 0x08, 0x08); /* Set GPIO3 HIGH to enable U5 voltage regulator */ regmap_update_bits(devrec->regmap_short, REG_GPIO, 0x08, 0x08); /* Reduce TX pwr to meet FCC requirements. * From MRF24J40MC datasheet section 3.1.1 */ regmap_write(devrec->regmap_long, REG_RFCON3, 0x28); } irq_type = irq_get_trigger_type(devrec->spi->irq); if (irq_type == IRQ_TYPE_EDGE_RISING || irq_type == IRQ_TYPE_EDGE_FALLING) dev_warn(&devrec->spi->dev, "Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n"); switch (irq_type) { case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_LEVEL_HIGH: /* set interrupt polarity to rising */ ret = regmap_update_bits(devrec->regmap_long, REG_SLPCON0, BIT_INTEDGE, BIT_INTEDGE); if (ret) goto err_ret; break; default: /* default is falling edge */ break; } return 0; err_ret: return ret; } static void mrf24j40_setup_tx_spi_messages(struct mrf24j40 *devrec) { spi_message_init(&devrec->tx_msg); devrec->tx_msg.context = devrec; devrec->tx_msg.complete = write_tx_buf_complete; devrec->tx_hdr_trx.len = 2; devrec->tx_hdr_trx.tx_buf = devrec->tx_hdr_buf; spi_message_add_tail(&devrec->tx_hdr_trx, &devrec->tx_msg); devrec->tx_len_trx.len = 2; devrec->tx_len_trx.tx_buf = devrec->tx_len_buf; spi_message_add_tail(&devrec->tx_len_trx, &devrec->tx_msg); spi_message_add_tail(&devrec->tx_buf_trx, &devrec->tx_msg); spi_message_init(&devrec->tx_post_msg); devrec->tx_post_msg.context = devrec; devrec->tx_post_trx.len = 2; devrec->tx_post_trx.tx_buf = devrec->tx_post_buf; spi_message_add_tail(&devrec->tx_post_trx, &devrec->tx_post_msg); } static void mrf24j40_setup_rx_spi_messages(struct mrf24j40 *devrec) { spi_message_init(&devrec->rx_msg); devrec->rx_msg.context = devrec; devrec->rx_trx.len = 2; devrec->rx_trx.tx_buf = devrec->rx_buf; devrec->rx_trx.rx_buf = devrec->rx_buf; spi_message_add_tail(&devrec->rx_trx, &devrec->rx_msg); spi_message_init(&devrec->rx_buf_msg); devrec->rx_buf_msg.context = devrec; devrec->rx_buf_msg.complete = mrf24j40_handle_rx_read_buf_complete; devrec->rx_addr_trx.len = 2; devrec->rx_addr_trx.tx_buf = devrec->rx_addr_buf; spi_message_add_tail(&devrec->rx_addr_trx, &devrec->rx_buf_msg); devrec->rx_fifo_buf_trx.rx_buf = devrec->rx_fifo_buf; spi_message_add_tail(&devrec->rx_fifo_buf_trx, &devrec->rx_buf_msg); devrec->rx_lqi_trx.len = 2; devrec->rx_lqi_trx.rx_buf = devrec->rx_lqi_buf; spi_message_add_tail(&devrec->rx_lqi_trx, &devrec->rx_buf_msg); } static void mrf24j40_setup_irq_spi_messages(struct mrf24j40 *devrec) { spi_message_init(&devrec->irq_msg); devrec->irq_msg.context = devrec; devrec->irq_msg.complete = mrf24j40_intstat_complete; devrec->irq_trx.len = 2; devrec->irq_trx.tx_buf = devrec->irq_buf; devrec->irq_trx.rx_buf = devrec->irq_buf; spi_message_add_tail(&devrec->irq_trx, &devrec->irq_msg); } static void mrf24j40_phy_setup(struct mrf24j40 *devrec) { ieee802154_random_extended_addr(&devrec->hw->phy->perm_extended_addr); devrec->hw->phy->current_channel = 11; /* mrf24j40 supports max_minbe 0 - 3 */ devrec->hw->phy->supported.max_minbe = 3; /* datasheet doesn't say anything about max_be, but we have min_be * So we assume the max_be default. */ devrec->hw->phy->supported.min_maxbe = 5; devrec->hw->phy->supported.max_maxbe = 5; devrec->hw->phy->cca.mode = NL802154_CCA_CARRIER; devrec->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); devrec->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND); devrec->hw->phy->cca_ed_level = -6900; devrec->hw->phy->supported.cca_ed_levels = mrf24j40_ed_levels; devrec->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(mrf24j40_ed_levels); switch (spi_get_device_id(devrec->spi)->driver_data) { case MRF24J40: case MRF24J40MA: devrec->hw->phy->supported.tx_powers = mrf24j40ma_powers; devrec->hw->phy->supported.tx_powers_size = ARRAY_SIZE(mrf24j40ma_powers); devrec->hw->phy->flags |= WPAN_PHY_FLAG_TXPOWER; break; default: break; } } static int mrf24j40_probe(struct spi_device *spi) { int ret = -ENOMEM, irq_type; struct ieee802154_hw *hw; struct mrf24j40 *devrec; dev_info(&spi->dev, "probe(). IRQ: %d\n", spi->irq); /* Register with the 802154 subsystem */ hw = ieee802154_alloc_hw(sizeof(*devrec), &mrf24j40_ops); if (!hw) goto err_ret; devrec = hw->priv; devrec->spi = spi; spi_set_drvdata(spi, devrec); devrec->hw = hw; devrec->hw->parent = &spi->dev; devrec->hw->phy->supported.channels[0] = CHANNEL_MASK; devrec->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_CSMA_PARAMS | IEEE802154_HW_PROMISCUOUS; devrec->hw->phy->flags = WPAN_PHY_FLAG_CCA_MODE | WPAN_PHY_FLAG_CCA_ED_LEVEL; mrf24j40_setup_tx_spi_messages(devrec); mrf24j40_setup_rx_spi_messages(devrec); mrf24j40_setup_irq_spi_messages(devrec); devrec->regmap_short = devm_regmap_init_spi(spi, &mrf24j40_short_regmap); if (IS_ERR(devrec->regmap_short)) { ret = PTR_ERR(devrec->regmap_short); dev_err(&spi->dev, "Failed to allocate short register map: %d\n", ret); goto err_register_device; } devrec->regmap_long = devm_regmap_init(&spi->dev, &mrf24j40_long_regmap_bus, spi, &mrf24j40_long_regmap); if (IS_ERR(devrec->regmap_long)) { ret = PTR_ERR(devrec->regmap_long); dev_err(&spi->dev, "Failed to allocate long register map: %d\n", ret); goto err_register_device; } if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) { dev_warn(&spi->dev, "spi clock above possible maximum: %d", MAX_SPI_SPEED_HZ); ret = -EINVAL; goto err_register_device; } ret = mrf24j40_hw_init(devrec); if (ret) goto err_register_device; mrf24j40_phy_setup(devrec); /* request IRQF_TRIGGER_LOW as fallback default */ irq_type = irq_get_trigger_type(spi->irq); if (!irq_type) irq_type = IRQF_TRIGGER_LOW; ret = devm_request_irq(&spi->dev, spi->irq, mrf24j40_isr, irq_type, dev_name(&spi->dev), devrec); if (ret) { dev_err(printdev(devrec), "Unable to get IRQ"); goto err_register_device; } dev_dbg(printdev(devrec), "registered mrf24j40\n"); ret = ieee802154_register_hw(devrec->hw); if (ret) goto err_register_device; return 0; err_register_device: ieee802154_free_hw(devrec->hw); err_ret: return ret; } static void mrf24j40_remove(struct spi_device *spi) { struct mrf24j40 *devrec = spi_get_drvdata(spi); dev_dbg(printdev(devrec), "remove\n"); ieee802154_unregister_hw(devrec->hw); ieee802154_free_hw(devrec->hw); /* TODO: Will ieee802154_free_device() wait until ->xmit() is * complete? */ } static const struct of_device_id mrf24j40_of_match[] = { { .compatible = "microchip,mrf24j40", .data = (void *)MRF24J40 }, { .compatible = "microchip,mrf24j40ma", .data = (void *)MRF24J40MA }, { .compatible = "microchip,mrf24j40mc", .data = (void *)MRF24J40MC }, { }, }; MODULE_DEVICE_TABLE(of, mrf24j40_of_match); static const struct spi_device_id mrf24j40_ids[] = { { "mrf24j40", MRF24J40 }, { "mrf24j40ma", MRF24J40MA }, { "mrf24j40mc", MRF24J40MC }, { }, }; MODULE_DEVICE_TABLE(spi, mrf24j40_ids); static struct spi_driver mrf24j40_driver = { .driver = { .of_match_table = mrf24j40_of_match, .name = "mrf24j40", }, .id_table = mrf24j40_ids, .probe = mrf24j40_probe, .remove = mrf24j40_remove, }; module_spi_driver(mrf24j40_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alan Ott"); MODULE_DESCRIPTION("MRF24J40 SPI 802.15.4 Controller Driver");
linux-master
drivers/net/ieee802154/mrf24j40.c
// SPDX-License-Identifier: GPL-2.0-only /* * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle * * Written 2013 by Werner Almesberger <[email protected]> * * Copyright (c) 2015 - 2016 Stefan Schmidt <[email protected]> * * Based on at86rf230.c and spi_atusb.c. * at86rf230.c is * Copyright (C) 2009 Siemens AG * Written by: Dmitry Eremin-Solenikov <[email protected]> * * spi_atusb.c is * Copyright (c) 2011 Richard Sharpe <[email protected]> * Copyright (c) 2011 Stefan Schmidt <[email protected]> * Copyright (c) 2011 Werner Almesberger <[email protected]> * * USB initialization is * Copyright (c) 2013 Alexander Aring <[email protected]> * * Busware HUL support is * Copyright (c) 2017 Josef Filzmaier <[email protected]> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/usb.h> #include <linux/skbuff.h> #include <net/cfg802154.h> #include <net/mac802154.h> #include "at86rf230.h" #include "atusb.h" #define ATUSB_JEDEC_ATMEL 0x1f /* JEDEC manufacturer ID */ #define ATUSB_NUM_RX_URBS 4 /* allow for a bit of local latency */ #define ATUSB_ALLOC_DELAY_MS 100 /* delay after failed allocation */ #define ATUSB_TX_TIMEOUT_MS 200 /* on the air timeout */ struct atusb { struct ieee802154_hw *hw; struct usb_device *usb_dev; struct atusb_chip_data *data; int shutdown; /* non-zero if shutting down */ int err; /* set by first error */ /* RX variables */ struct delayed_work work; /* memory allocations */ struct usb_anchor idle_urbs; /* URBs waiting to be submitted */ struct usb_anchor rx_urbs; /* URBs waiting for reception */ /* TX variables */ struct usb_ctrlrequest tx_dr; struct urb *tx_urb; struct sk_buff *tx_skb; u8 tx_ack_seq; /* current TX ACK sequence number */ /* Firmware variable */ unsigned char fw_ver_maj; /* Firmware major version number */ unsigned char fw_ver_min; /* Firmware minor version number */ unsigned char fw_hw_type; /* Firmware hardware type */ }; struct atusb_chip_data { u16 t_channel_switch; int rssi_base_val; int (*set_channel)(struct ieee802154_hw*, u8, u8); int (*set_txpower)(struct ieee802154_hw*, s32); }; static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask, u8 shift, u8 value) { struct usb_device *usb_dev = atusb->usb_dev; u8 orig, tmp; int ret = 0; dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value); ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, reg, &orig, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; /* Write the value only into that part of the register which is allowed * by the mask. All other bits stay as before. */ tmp = orig & ~mask; tmp |= (value << shift) & mask; if (tmp != orig) ret = usb_control_msg_send(usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, tmp, reg, NULL, 0, 1000, GFP_KERNEL); return ret; } static int atusb_read_subreg(struct atusb *lp, unsigned int addr, unsigned int mask, unsigned int shift) { int reg, ret; ret = usb_control_msg_recv(lp->usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, addr, &reg, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; reg = (reg & mask) >> shift; return reg; } static int atusb_get_and_clear_error(struct atusb *atusb) { int err = atusb->err; atusb->err = 0; return err; } /* ----- skb allocation ---------------------------------------------------- */ #define MAX_PSDU 127 #define MAX_RX_XFER (1 + MAX_PSDU + 2 + 1) /* PHR+PSDU+CRC+LQI */ #define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb) static void atusb_in(struct urb *urb); static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb) { struct usb_device *usb_dev = atusb->usb_dev; struct sk_buff *skb = urb->context; int ret; if (!skb) { skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL); if (!skb) { dev_warn_ratelimited(&usb_dev->dev, "atusb_in: can't allocate skb\n"); return -ENOMEM; } skb_put(skb, MAX_RX_XFER); SKB_ATUSB(skb) = atusb; } usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1), skb->data, MAX_RX_XFER, atusb_in, skb); usb_anchor_urb(urb, &atusb->rx_urbs); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); kfree_skb(skb); urb->context = NULL; } return ret; } static void atusb_work_urbs(struct work_struct *work) { struct atusb *atusb = container_of(to_delayed_work(work), struct atusb, work); struct usb_device *usb_dev = atusb->usb_dev; struct urb *urb; int ret; if (atusb->shutdown) return; do { urb = usb_get_from_anchor(&atusb->idle_urbs); if (!urb) return; ret = atusb_submit_rx_urb(atusb, urb); } while (!ret); usb_anchor_urb(urb, &atusb->idle_urbs); dev_warn_ratelimited(&usb_dev->dev, "atusb_in: can't allocate/submit URB (%d)\n", ret); schedule_delayed_work(&atusb->work, msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1); } /* ----- Asynchronous USB -------------------------------------------------- */ static void atusb_tx_done(struct atusb *atusb, u8 seq, int reason) { struct usb_device *usb_dev = atusb->usb_dev; u8 expect = atusb->tx_ack_seq; dev_dbg(&usb_dev->dev, "%s (0x%02x/0x%02x)\n", __func__, seq, expect); if (seq == expect) { /* TODO check for ifs handling in firmware */ if (reason == IEEE802154_SUCCESS) ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false); else ieee802154_xmit_error(atusb->hw, atusb->tx_skb, reason); } else { /* TODO I experience this case when atusb has a tx complete * irq before probing, we should fix the firmware it's an * unlikely case now that seq == expect is then true, but can * happen and fail with a tx_skb = NULL; */ ieee802154_xmit_hw_error(atusb->hw, atusb->tx_skb); } } static void atusb_in_good(struct urb *urb) { struct usb_device *usb_dev = urb->dev; struct sk_buff *skb = urb->context; struct atusb *atusb = SKB_ATUSB(skb); int result = IEEE802154_SUCCESS; u8 len, lqi, trac; if (!urb->actual_length) { dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n"); return; } len = *skb->data; switch (urb->actual_length) { case 2: trac = TRAC_MASK(*(skb->data + 1)); switch (trac) { case TRAC_SUCCESS: case TRAC_SUCCESS_DATA_PENDING: /* already IEEE802154_SUCCESS */ break; case TRAC_CHANNEL_ACCESS_FAILURE: result = IEEE802154_CHANNEL_ACCESS_FAILURE; break; case TRAC_NO_ACK: result = IEEE802154_NO_ACK; break; default: result = IEEE802154_SYSTEM_ERROR; } fallthrough; case 1: atusb_tx_done(atusb, len, result); return; } if (len + 1 > urb->actual_length - 1) { dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n", len, urb->actual_length); return; } if (!ieee802154_is_valid_psdu_len(len)) { dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n"); return; } lqi = skb->data[len + 1]; dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi); skb_pull(skb, 1); /* remove PHR */ skb_trim(skb, len); /* get payload only */ ieee802154_rx_irqsafe(atusb->hw, skb, lqi); urb->context = NULL; /* skb is gone */ } static void atusb_in(struct urb *urb) { struct usb_device *usb_dev = urb->dev; struct sk_buff *skb = urb->context; struct atusb *atusb = SKB_ATUSB(skb); dev_dbg(&usb_dev->dev, "%s: status %d len %d\n", __func__, urb->status, urb->actual_length); if (urb->status) { if (urb->status == -ENOENT) { /* being killed */ kfree_skb(skb); urb->context = NULL; return; } dev_dbg(&usb_dev->dev, "%s: URB error %d\n", __func__, urb->status); } else { atusb_in_good(urb); } usb_anchor_urb(urb, &atusb->idle_urbs); if (!atusb->shutdown) schedule_delayed_work(&atusb->work, 0); } /* ----- URB allocation/deallocation --------------------------------------- */ static void atusb_free_urbs(struct atusb *atusb) { struct urb *urb; while (1) { urb = usb_get_from_anchor(&atusb->idle_urbs); if (!urb) break; kfree_skb(urb->context); usb_free_urb(urb); } } static int atusb_alloc_urbs(struct atusb *atusb, int n) { struct urb *urb; while (n) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { atusb_free_urbs(atusb); return -ENOMEM; } usb_anchor_urb(urb, &atusb->idle_urbs); usb_free_urb(urb); n--; } return 0; } /* ----- IEEE 802.15.4 interface operations -------------------------------- */ static void atusb_xmit_complete(struct urb *urb) { dev_dbg(&urb->dev->dev, "atusb_xmit urb completed"); } static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct atusb *atusb = hw->priv; struct usb_device *usb_dev = atusb->usb_dev; int ret; dev_dbg(&usb_dev->dev, "%s (%d)\n", __func__, skb->len); atusb->tx_skb = skb; atusb->tx_ack_seq++; atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq); atusb->tx_dr.wLength = cpu_to_le16(skb->len); usb_fill_control_urb(atusb->tx_urb, usb_dev, usb_sndctrlpipe(usb_dev, 0), (unsigned char *)&atusb->tx_dr, skb->data, skb->len, atusb_xmit_complete, NULL); ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC); dev_dbg(&usb_dev->dev, "%s done (%d)\n", __func__, ret); return ret; } static int atusb_ed(struct ieee802154_hw *hw, u8 *level) { WARN_ON(!level); *level = 0xbe; return 0; } static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { struct atusb *atusb = hw->priv; struct device *dev = &atusb->usb_dev->dev; if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); dev_vdbg(dev, "%s called for saddr\n", __func__); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, addr, RG_SHORT_ADDR_0, NULL, 0, 1000, GFP_KERNEL); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, addr >> 8, RG_SHORT_ADDR_1, NULL, 0, 1000, GFP_KERNEL); } if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 pan = le16_to_cpu(filt->pan_id); dev_vdbg(dev, "%s called for pan id\n", __func__); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, pan, RG_PAN_ID_0, NULL, 0, 1000, GFP_KERNEL); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, pan >> 8, RG_PAN_ID_1, NULL, 0, 1000, GFP_KERNEL); } if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN]; memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN); dev_vdbg(dev, "%s called for IEEE addr\n", __func__); for (i = 0; i < 8; i++) usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, addr[i], RG_IEEE_ADDR_0 + i, NULL, 0, 1000, GFP_KERNEL); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { dev_vdbg(dev, "%s called for panc change\n", __func__); if (filt->pan_coord) atusb_write_subreg(atusb, SR_AACK_I_AM_COORD, 1); else atusb_write_subreg(atusb, SR_AACK_I_AM_COORD, 0); } return atusb_get_and_clear_error(atusb); } static int atusb_start(struct ieee802154_hw *hw) { struct atusb *atusb = hw->priv; struct usb_device *usb_dev = atusb->usb_dev; int ret; dev_dbg(&usb_dev->dev, "%s\n", __func__); schedule_delayed_work(&atusb->work, 0); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 1, 0, NULL, 0, 1000, GFP_KERNEL); ret = atusb_get_and_clear_error(atusb); if (ret < 0) usb_kill_anchored_urbs(&atusb->idle_urbs); return ret; } static void atusb_stop(struct ieee802154_hw *hw) { struct atusb *atusb = hw->priv; struct usb_device *usb_dev = atusb->usb_dev; dev_dbg(&usb_dev->dev, "%s\n", __func__); usb_kill_anchored_urbs(&atusb->idle_urbs); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 0, 0, NULL, 0, 1000, GFP_KERNEL); atusb_get_and_clear_error(atusb); } #define ATUSB_MAX_TX_POWERS 0xF static const s32 atusb_powers[ATUSB_MAX_TX_POWERS + 1] = { 300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700, -900, -1200, -1700, }; static int atusb_txpower(struct ieee802154_hw *hw, s32 mbm) { struct atusb *atusb = hw->priv; if (atusb->data) return atusb->data->set_txpower(hw, mbm); else return -ENOTSUPP; } static int atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) { struct atusb *atusb = hw->priv; u32 i; for (i = 0; i < hw->phy->supported.tx_powers_size; i++) { if (hw->phy->supported.tx_powers[i] == mbm) return atusb_write_subreg(atusb, SR_TX_PWR_23X, i); } return -EINVAL; } static int hulusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) { u32 i; for (i = 0; i < hw->phy->supported.tx_powers_size; i++) { if (hw->phy->supported.tx_powers[i] == mbm) return atusb_write_subreg(hw->priv, SR_TX_PWR_212, i); } return -EINVAL; } #define ATUSB_MAX_ED_LEVELS 0xF static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = { -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300, -7100, -6900, -6700, -6500, -6300, -6100, }; #define AT86RF212_MAX_TX_POWERS 0x1F static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = { 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700, -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700, -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600, }; #define AT86RF2XX_MAX_ED_LEVELS 0xF static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = { -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600, -7400, -7200, -7000, }; static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = { -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600, -7400, -7200, -7000, -6800, }; static int atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca) { struct atusb *atusb = hw->priv; u8 val; /* mapping 802.15.4 to driver spec */ switch (cca->mode) { case NL802154_CCA_ENERGY: val = 1; break; case NL802154_CCA_CARRIER: val = 2; break; case NL802154_CCA_ENERGY_CARRIER: switch (cca->opt) { case NL802154_CCA_OPT_ENERGY_CARRIER_AND: val = 3; break; case NL802154_CCA_OPT_ENERGY_CARRIER_OR: val = 0; break; default: return -EINVAL; } break; default: return -EINVAL; } return atusb_write_subreg(atusb, SR_CCA_MODE, val); } static int hulusb_set_cca_ed_level(struct atusb *lp, int rssi_base_val) { int cca_ed_thres; cca_ed_thres = atusb_read_subreg(lp, SR_CCA_ED_THRES); if (cca_ed_thres < 0) return cca_ed_thres; switch (rssi_base_val) { case -98: lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98); lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres]; break; case -100: lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres]; break; default: WARN_ON(1); } return 0; } static int atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct atusb *atusb = hw->priv; u32 i; for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { if (hw->phy->supported.cca_ed_levels[i] == mbm) return atusb_write_subreg(atusb, SR_CCA_ED_THRES, i); } return -EINVAL; } static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct atusb *atusb = hw->priv; int ret = -ENOTSUPP; if (atusb->data) { ret = atusb->data->set_channel(hw, page, channel); /* @@@ ugly synchronization */ msleep(atusb->data->t_channel_switch); } return ret; } static int atusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct atusb *atusb = hw->priv; int ret; ret = atusb_write_subreg(atusb, SR_CHANNEL, channel); if (ret < 0) return ret; return 0; } static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { int rc; int rssi_base_val; struct atusb *lp = hw->priv; if (channel == 0) rc = atusb_write_subreg(lp, SR_SUB_MODE, 0); else rc = atusb_write_subreg(lp, SR_SUB_MODE, 1); if (rc < 0) return rc; if (page == 0) { rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 0); rssi_base_val = -100; } else { rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 1); rssi_base_val = -98; } if (rc < 0) return rc; rc = hulusb_set_cca_ed_level(lp, rssi_base_val); if (rc < 0) return rc; return atusb_write_subreg(lp, SR_CHANNEL, channel); } static int atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) { struct atusb *atusb = hw->priv; int ret; ret = atusb_write_subreg(atusb, SR_MIN_BE, min_be); if (ret) return ret; ret = atusb_write_subreg(atusb, SR_MAX_BE, max_be); if (ret) return ret; return atusb_write_subreg(atusb, SR_MAX_CSMA_RETRIES, retries); } static int hulusb_set_lbt(struct ieee802154_hw *hw, bool on) { struct atusb *atusb = hw->priv; return atusb_write_subreg(atusb, SR_CSMA_LBT_MODE, on); } static int atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct atusb *atusb = hw->priv; return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); } static int atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { struct atusb *atusb = hw->priv; int ret; if (on) { ret = atusb_write_subreg(atusb, SR_AACK_DIS_ACK, 1); if (ret < 0) return ret; ret = atusb_write_subreg(atusb, SR_AACK_PROM_MODE, 1); if (ret < 0) return ret; } else { ret = atusb_write_subreg(atusb, SR_AACK_PROM_MODE, 0); if (ret < 0) return ret; ret = atusb_write_subreg(atusb, SR_AACK_DIS_ACK, 0); if (ret < 0) return ret; } return 0; } static struct atusb_chip_data atusb_chip_data = { .t_channel_switch = 1, .rssi_base_val = -91, .set_txpower = atusb_set_txpower, .set_channel = atusb_set_channel, }; static struct atusb_chip_data hulusb_chip_data = { .t_channel_switch = 11, .rssi_base_val = -100, .set_txpower = hulusb_set_txpower, .set_channel = hulusb_set_channel, }; static const struct ieee802154_ops atusb_ops = { .owner = THIS_MODULE, .xmit_async = atusb_xmit, .ed = atusb_ed, .set_channel = atusb_channel, .start = atusb_start, .stop = atusb_stop, .set_hw_addr_filt = atusb_set_hw_addr_filt, .set_txpower = atusb_txpower, .set_lbt = hulusb_set_lbt, .set_cca_mode = atusb_set_cca_mode, .set_cca_ed_level = atusb_set_cca_ed_level, .set_csma_params = atusb_set_csma_params, .set_frame_retries = atusb_set_frame_retries, .set_promiscuous_mode = atusb_set_promiscuous_mode, }; /* ----- Firmware and chip version information ----------------------------- */ static int atusb_get_and_show_revision(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; char *hw_name; unsigned char buffer[3]; int ret; /* Get a couple of the ATMega Firmware values */ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, buffer, 3, 1000, GFP_KERNEL); if (!ret) { atusb->fw_ver_maj = buffer[0]; atusb->fw_ver_min = buffer[1]; atusb->fw_hw_type = buffer[2]; switch (atusb->fw_hw_type) { case ATUSB_HW_TYPE_100813: case ATUSB_HW_TYPE_101216: case ATUSB_HW_TYPE_110131: hw_name = "ATUSB"; atusb->data = &atusb_chip_data; break; case ATUSB_HW_TYPE_RZUSB: hw_name = "RZUSB"; atusb->data = &atusb_chip_data; break; case ATUSB_HW_TYPE_HULUSB: hw_name = "HULUSB"; atusb->data = &hulusb_chip_data; break; default: hw_name = "UNKNOWN"; atusb->err = -ENOTSUPP; ret = -ENOTSUPP; break; } dev_info(&usb_dev->dev, "Firmware: major: %u, minor: %u, hardware type: %s (%d)\n", atusb->fw_ver_maj, atusb->fw_ver_min, hw_name, atusb->fw_hw_type); } if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) { dev_info(&usb_dev->dev, "Firmware version (%u.%u) predates our first public release.", atusb->fw_ver_maj, atusb->fw_ver_min); dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); } return ret; } static int atusb_get_and_show_build(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; char *build; int ret; build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL); if (!build) return -ENOMEM; ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000); if (ret >= 0) { build[ret] = 0; dev_info(&usb_dev->dev, "Firmware: build %s\n", build); } kfree(build); return ret; } static int atusb_get_and_conf_chip(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; u8 man_id_0, man_id_1, part_num, version_num; const char *chip; struct ieee802154_hw *hw = atusb->hw; int ret; ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, RG_MAN_ID_0, &man_id_0, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, RG_MAN_ID_1, &man_id_1, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, RG_PART_NUM, &part_num, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, RG_VERSION_NUM, &version_num, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); hw->phy->cca.mode = NL802154_CCA_ENERGY; hw->phy->current_page = 0; if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) { dev_err(&usb_dev->dev, "non-Atmel transceiver xxxx%02x%02x\n", man_id_1, man_id_0); goto fail; } switch (part_num) { case 2: chip = "AT86RF230"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); break; case 3: chip = "AT86RF231"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); break; case 7: chip = "AT86RF212"; atusb->hw->flags |= IEEE802154_HW_LBT; atusb->hw->phy->supported.channels[0] = 0x00007FF; atusb->hw->phy->supported.channels[2] = 0x00007FF; atusb->hw->phy->current_channel = 5; atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; atusb->hw->phy->supported.tx_powers = at86rf212_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); atusb->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; atusb->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); break; default: dev_err(&usb_dev->dev, "unexpected transceiver, part 0x%02x version 0x%02x\n", part_num, version_num); goto fail; } hw->phy->transmit_power = hw->phy->supported.tx_powers[0]; hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7]; dev_info(&usb_dev->dev, "ATUSB: %s version %d\n", chip, version_num); return 0; fail: atusb->err = -ENODEV; return -ENODEV; } static int atusb_set_extended_addr(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN]; __le64 extended_addr; u64 addr; int ret; /* Firmware versions before 0.3 do not support the EUI64_READ command. * Just use a random address and be done. */ if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) { ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); return 0; } /* Firmware is new enough so we fetch the address from EEPROM */ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0, buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000, GFP_KERNEL); if (ret < 0) { dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n"); ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); return ret; } memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN); /* Check if read address is not empty and the unicast bit is set correctly */ if (!ieee802154_is_valid_extended_unicast_addr(extended_addr)) { dev_info(&usb_dev->dev, "no permanent extended address found, random address set\n"); ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); } else { atusb->hw->phy->perm_extended_addr = extended_addr; addr = swab64((__force u64)atusb->hw->phy->perm_extended_addr); dev_info(&usb_dev->dev, "Read permanent extended address %8phC from device\n", &addr); } return ret; } /* ----- Setup ------------------------------------------------------------- */ static int atusb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(interface); struct ieee802154_hw *hw; struct atusb *atusb = NULL; int ret = -ENOMEM; hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops); if (!hw) return -ENOMEM; atusb = hw->priv; atusb->hw = hw; atusb->usb_dev = usb_get_dev(usb_dev); usb_set_intfdata(interface, atusb); atusb->shutdown = 0; atusb->err = 0; INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs); init_usb_anchor(&atusb->idle_urbs); init_usb_anchor(&atusb->rx_urbs); if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS)) goto fail; atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV; atusb->tx_dr.bRequest = ATUSB_TX; atusb->tx_dr.wValue = cpu_to_le16(0); atusb->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!atusb->tx_urb) goto fail; hw->parent = &usb_dev->dev; usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RF_RESET, ATUSB_REQ_TO_DEV, 0, 0, NULL, 0, 1000, GFP_KERNEL); atusb_get_and_conf_chip(atusb); atusb_get_and_show_revision(atusb); atusb_get_and_show_build(atusb); atusb_set_extended_addr(atusb); if ((atusb->fw_ver_maj == 0 && atusb->fw_ver_min >= 3) || atusb->fw_ver_maj > 0) hw->flags |= IEEE802154_HW_FRAME_RETRIES; ret = atusb_get_and_clear_error(atusb); if (ret) { dev_err(&atusb->usb_dev->dev, "%s: initialization failed, error = %d\n", __func__, ret); goto fail; } ret = ieee802154_register_hw(hw); if (ret) goto fail; /* If we just powered on, we're now in P_ON and need to enter TRX_OFF * explicitly. Any resets after that will send us straight to TRX_OFF, * making the command below redundant. */ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, STATE_FORCE_TRX_OFF, RG_TRX_STATE, NULL, 0, 1000, GFP_KERNEL); msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */ #if 0 /* Calculating the maximum time available to empty the frame buffer * on reception: * * According to [1], the inter-frame gap is * R * 20 * 16 us + 128 us * where R is a random number from 0 to 7. Furthermore, we have 20 bit * times (80 us at 250 kbps) of SHR of the next frame before the * transceiver begins storing data in the frame buffer. * * This yields a minimum time of 208 us between the last data of a * frame and the first data of the next frame. This time is further * reduced by interrupt latency in the atusb firmware. * * atusb currently needs about 500 us to retrieve a maximum-sized * frame. We therefore have to allow reception of a new frame to begin * while we retrieve the previous frame. * * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based * network", Jennic 2006. * http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf */ atusb_write_subreg(atusb, SR_RX_SAFE_MODE, 1); #endif usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, 0xff, RG_IRQ_MASK, NULL, 0, 1000, GFP_KERNEL); ret = atusb_get_and_clear_error(atusb); if (!ret) return 0; dev_err(&atusb->usb_dev->dev, "%s: setup failed, error = %d\n", __func__, ret); ieee802154_unregister_hw(hw); fail: atusb_free_urbs(atusb); usb_kill_urb(atusb->tx_urb); usb_free_urb(atusb->tx_urb); usb_put_dev(usb_dev); ieee802154_free_hw(hw); return ret; } static void atusb_disconnect(struct usb_interface *interface) { struct atusb *atusb = usb_get_intfdata(interface); dev_dbg(&atusb->usb_dev->dev, "%s\n", __func__); atusb->shutdown = 1; cancel_delayed_work_sync(&atusb->work); usb_kill_anchored_urbs(&atusb->rx_urbs); atusb_free_urbs(atusb); usb_kill_urb(atusb->tx_urb); usb_free_urb(atusb->tx_urb); ieee802154_unregister_hw(atusb->hw); usb_put_dev(atusb->usb_dev); ieee802154_free_hw(atusb->hw); usb_set_intfdata(interface, NULL); pr_debug("%s done\n", __func__); } /* The devices we work with */ static const struct usb_device_id atusb_device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = ATUSB_VENDOR_ID, .idProduct = ATUSB_PRODUCT_ID, .bInterfaceClass = USB_CLASS_VENDOR_SPEC }, /* end with null element */ {} }; MODULE_DEVICE_TABLE(usb, atusb_device_table); static struct usb_driver atusb_driver = { .name = "atusb", .probe = atusb_probe, .disconnect = atusb_disconnect, .id_table = atusb_device_table, }; module_usb_driver(atusb_driver); MODULE_AUTHOR("Alexander Aring <[email protected]>"); MODULE_AUTHOR("Richard Sharpe <[email protected]>"); MODULE_AUTHOR("Stefan Schmidt <[email protected]>"); MODULE_AUTHOR("Werner Almesberger <[email protected]>"); MODULE_AUTHOR("Josef Filzmaier <[email protected]>"); MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ieee802154/atusb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Analog Devices ADF7242 Low-Power IEEE 802.15.4 Transceiver * * Copyright 2009-2017 Analog Devices Inc. * * https://www.analog.com/ADF7242 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/firmware.h> #include <linux/spi/spi.h> #include <linux/skbuff.h> #include <linux/of.h> #include <linux/irq.h> #include <linux/debugfs.h> #include <linux/bitops.h> #include <linux/ieee802154.h> #include <net/mac802154.h> #include <net/cfg802154.h> #define FIRMWARE "adf7242_firmware.bin" #define MAX_POLL_LOOPS 200 /* All Registers */ #define REG_EXT_CTRL 0x100 /* RW External LNA/PA and internal PA control */ #define REG_TX_FSK_TEST 0x101 /* RW TX FSK test mode configuration */ #define REG_CCA1 0x105 /* RW RSSI threshold for CCA */ #define REG_CCA2 0x106 /* RW CCA mode configuration */ #define REG_BUFFERCFG 0x107 /* RW RX_BUFFER overwrite control */ #define REG_PKT_CFG 0x108 /* RW FCS evaluation configuration */ #define REG_DELAYCFG0 0x109 /* RW RC_RX command to SFD or sync word delay */ #define REG_DELAYCFG1 0x10A /* RW RC_TX command to TX state */ #define REG_DELAYCFG2 0x10B /* RW Mac delay extension */ #define REG_SYNC_WORD0 0x10C /* RW sync word bits [7:0] of [23:0] */ #define REG_SYNC_WORD1 0x10D /* RW sync word bits [15:8] of [23:0] */ #define REG_SYNC_WORD2 0x10E /* RW sync word bits [23:16] of [23:0] */ #define REG_SYNC_CONFIG 0x10F /* RW sync word configuration */ #define REG_RC_CFG 0x13E /* RW RX / TX packet configuration */ #define REG_RC_VAR44 0x13F /* RW RESERVED */ #define REG_CH_FREQ0 0x300 /* RW Channel Frequency Settings - Low */ #define REG_CH_FREQ1 0x301 /* RW Channel Frequency Settings - Middle */ #define REG_CH_FREQ2 0x302 /* RW Channel Frequency Settings - High */ #define REG_TX_FD 0x304 /* RW TX Frequency Deviation Register */ #define REG_DM_CFG0 0x305 /* RW RX Discriminator BW Register */ #define REG_TX_M 0x306 /* RW TX Mode Register */ #define REG_RX_M 0x307 /* RW RX Mode Register */ #define REG_RRB 0x30C /* R RSSI Readback Register */ #define REG_LRB 0x30D /* R Link Quality Readback Register */ #define REG_DR0 0x30E /* RW bits [15:8] of [15:0] data rate setting */ #define REG_DR1 0x30F /* RW bits [7:0] of [15:0] data rate setting */ #define REG_PRAMPG 0x313 /* RW RESERVED */ #define REG_TXPB 0x314 /* RW TX Packet Storage Base Address */ #define REG_RXPB 0x315 /* RW RX Packet Storage Base Address */ #define REG_TMR_CFG0 0x316 /* RW Wake up Timer Conf Register - High */ #define REG_TMR_CFG1 0x317 /* RW Wake up Timer Conf Register - Low */ #define REG_TMR_RLD0 0x318 /* RW Wake up Timer Value Register - High */ #define REG_TMR_RLD1 0x319 /* RW Wake up Timer Value Register - Low */ #define REG_TMR_CTRL 0x31A /* RW Wake up Timer Timeout flag */ #define REG_PD_AUX 0x31E /* RW Battmon enable */ #define REG_GP_CFG 0x32C /* RW GPIO Configuration */ #define REG_GP_OUT 0x32D /* RW GPIO Configuration */ #define REG_GP_IN 0x32E /* R GPIO Configuration */ #define REG_SYNT 0x335 /* RW bandwidth calibration timers */ #define REG_CAL_CFG 0x33D /* RW Calibration Settings */ #define REG_PA_BIAS 0x36E /* RW PA BIAS */ #define REG_SYNT_CAL 0x371 /* RW Oscillator and Doubler Configuration */ #define REG_IIRF_CFG 0x389 /* RW BB Filter Decimation Rate */ #define REG_CDR_CFG 0x38A /* RW CDR kVCO */ #define REG_DM_CFG1 0x38B /* RW Postdemodulator Filter */ #define REG_AGCSTAT 0x38E /* R RXBB Ref Osc Calibration Engine Readback */ #define REG_RXCAL0 0x395 /* RW RX BB filter tuning, LSB */ #define REG_RXCAL1 0x396 /* RW RX BB filter tuning, MSB */ #define REG_RXFE_CFG 0x39B /* RW RXBB Ref Osc & RXFE Calibration */ #define REG_PA_RR 0x3A7 /* RW Set PA ramp rate */ #define REG_PA_CFG 0x3A8 /* RW PA enable */ #define REG_EXTPA_CFG 0x3A9 /* RW External PA BIAS DAC */ #define REG_EXTPA_MSC 0x3AA /* RW PA Bias Mode */ #define REG_ADC_RBK 0x3AE /* R Readback temp */ #define REG_AGC_CFG1 0x3B2 /* RW GC Parameters */ #define REG_AGC_MAX 0x3B4 /* RW Slew rate */ #define REG_AGC_CFG2 0x3B6 /* RW RSSI Parameters */ #define REG_AGC_CFG3 0x3B7 /* RW RSSI Parameters */ #define REG_AGC_CFG4 0x3B8 /* RW RSSI Parameters */ #define REG_AGC_CFG5 0x3B9 /* RW RSSI & NDEC Parameters */ #define REG_AGC_CFG6 0x3BA /* RW NDEC Parameters */ #define REG_OCL_CFG1 0x3C4 /* RW OCL System Parameters */ #define REG_IRQ1_EN0 0x3C7 /* RW Interrupt Mask set bits for IRQ1 */ #define REG_IRQ1_EN1 0x3C8 /* RW Interrupt Mask set bits for IRQ1 */ #define REG_IRQ2_EN0 0x3C9 /* RW Interrupt Mask set bits for IRQ2 */ #define REG_IRQ2_EN1 0x3CA /* RW Interrupt Mask set bits for IRQ2 */ #define REG_IRQ1_SRC0 0x3CB /* RW Interrupt Source bits for IRQ */ #define REG_IRQ1_SRC1 0x3CC /* RW Interrupt Source bits for IRQ */ #define REG_OCL_BW0 0x3D2 /* RW OCL System Parameters */ #define REG_OCL_BW1 0x3D3 /* RW OCL System Parameters */ #define REG_OCL_BW2 0x3D4 /* RW OCL System Parameters */ #define REG_OCL_BW3 0x3D5 /* RW OCL System Parameters */ #define REG_OCL_BW4 0x3D6 /* RW OCL System Parameters */ #define REG_OCL_BWS 0x3D7 /* RW OCL System Parameters */ #define REG_OCL_CFG13 0x3E0 /* RW OCL System Parameters */ #define REG_GP_DRV 0x3E3 /* RW I/O pads Configuration and bg trim */ #define REG_BM_CFG 0x3E6 /* RW Batt. Monitor Threshold Voltage setting */ #define REG_SFD_15_4 0x3F4 /* RW Option to set non standard SFD */ #define REG_AFC_CFG 0x3F7 /* RW AFC mode and polarity */ #define REG_AFC_KI_KP 0x3F8 /* RW AFC ki and kp */ #define REG_AFC_RANGE 0x3F9 /* RW AFC range */ #define REG_AFC_READ 0x3FA /* RW Readback frequency error */ /* REG_EXTPA_MSC */ #define PA_PWR(x) (((x) & 0xF) << 4) #define EXTPA_BIAS_SRC BIT(3) #define EXTPA_BIAS_MODE(x) (((x) & 0x7) << 0) /* REG_PA_CFG */ #define PA_BRIDGE_DBIAS(x) (((x) & 0x1F) << 0) #define PA_DBIAS_HIGH_POWER 21 #define PA_DBIAS_LOW_POWER 13 /* REG_PA_BIAS */ #define PA_BIAS_CTRL(x) (((x) & 0x1F) << 1) #define REG_PA_BIAS_DFL BIT(0) #define PA_BIAS_HIGH_POWER 63 #define PA_BIAS_LOW_POWER 55 #define REG_PAN_ID0 0x112 #define REG_PAN_ID1 0x113 #define REG_SHORT_ADDR_0 0x114 #define REG_SHORT_ADDR_1 0x115 #define REG_IEEE_ADDR_0 0x116 #define REG_IEEE_ADDR_1 0x117 #define REG_IEEE_ADDR_2 0x118 #define REG_IEEE_ADDR_3 0x119 #define REG_IEEE_ADDR_4 0x11A #define REG_IEEE_ADDR_5 0x11B #define REG_IEEE_ADDR_6 0x11C #define REG_IEEE_ADDR_7 0x11D #define REG_FFILT_CFG 0x11E #define REG_AUTO_CFG 0x11F #define REG_AUTO_TX1 0x120 #define REG_AUTO_TX2 0x121 #define REG_AUTO_STATUS 0x122 /* REG_FFILT_CFG */ #define ACCEPT_BEACON_FRAMES BIT(0) #define ACCEPT_DATA_FRAMES BIT(1) #define ACCEPT_ACK_FRAMES BIT(2) #define ACCEPT_MACCMD_FRAMES BIT(3) #define ACCEPT_RESERVED_FRAMES BIT(4) #define ACCEPT_ALL_ADDRESS BIT(5) /* REG_AUTO_CFG */ #define AUTO_ACK_FRAMEPEND BIT(0) #define IS_PANCOORD BIT(1) #define RX_AUTO_ACK_EN BIT(3) #define CSMA_CA_RX_TURNAROUND BIT(4) /* REG_AUTO_TX1 */ #define MAX_FRAME_RETRIES(x) ((x) & 0xF) #define MAX_CCA_RETRIES(x) (((x) & 0x7) << 4) /* REG_AUTO_TX2 */ #define CSMA_MAX_BE(x) ((x) & 0xF) #define CSMA_MIN_BE(x) (((x) & 0xF) << 4) #define CMD_SPI_NOP 0xFF /* No operation. Use for dummy writes */ #define CMD_SPI_PKT_WR 0x10 /* Write telegram to the Packet RAM * starting from the TX packet base address * pointer tx_packet_base */ #define CMD_SPI_PKT_RD 0x30 /* Read telegram from the Packet RAM * starting from RX packet base address * pointer rxpb.rx_packet_base */ #define CMD_SPI_MEM_WR(x) (0x18 + (x >> 8)) /* Write data to MCR or * Packet RAM sequentially */ #define CMD_SPI_MEM_RD(x) (0x38 + (x >> 8)) /* Read data from MCR or * Packet RAM sequentially */ #define CMD_SPI_MEMR_WR(x) (0x08 + (x >> 8)) /* Write data to MCR or Packet * RAM as random block */ #define CMD_SPI_MEMR_RD(x) (0x28 + (x >> 8)) /* Read data from MCR or * Packet RAM random block */ #define CMD_SPI_PRAM_WR 0x1E /* Write data sequentially to current * PRAM page selected */ #define CMD_SPI_PRAM_RD 0x3E /* Read data sequentially from current * PRAM page selected */ #define CMD_RC_SLEEP 0xB1 /* Invoke transition of radio controller * into SLEEP state */ #define CMD_RC_IDLE 0xB2 /* Invoke transition of radio controller * into IDLE state */ #define CMD_RC_PHY_RDY 0xB3 /* Invoke transition of radio controller * into PHY_RDY state */ #define CMD_RC_RX 0xB4 /* Invoke transition of radio controller * into RX state */ #define CMD_RC_TX 0xB5 /* Invoke transition of radio controller * into TX state */ #define CMD_RC_MEAS 0xB6 /* Invoke transition of radio controller * into MEAS state */ #define CMD_RC_CCA 0xB7 /* Invoke Clear channel assessment */ #define CMD_RC_CSMACA 0xC1 /* initiates CSMA-CA channel access * sequence and frame transmission */ #define CMD_RC_PC_RESET 0xC7 /* Program counter reset */ #define CMD_RC_RESET 0xC8 /* Resets the ADF7242 and puts it in * the sleep state */ #define CMD_RC_PC_RESET_NO_WAIT (CMD_RC_PC_RESET | BIT(31)) /* STATUS */ #define STAT_SPI_READY BIT(7) #define STAT_IRQ_STATUS BIT(6) #define STAT_RC_READY BIT(5) #define STAT_CCA_RESULT BIT(4) #define RC_STATUS_IDLE 1 #define RC_STATUS_MEAS 2 #define RC_STATUS_PHY_RDY 3 #define RC_STATUS_RX 4 #define RC_STATUS_TX 5 #define RC_STATUS_MASK 0xF /* AUTO_STATUS */ #define SUCCESS 0 #define SUCCESS_DATPEND 1 #define FAILURE_CSMACA 2 #define FAILURE_NOACK 3 #define AUTO_STATUS_MASK 0x3 #define PRAM_PAGESIZE 256 /* IRQ1 */ #define IRQ_CCA_COMPLETE BIT(0) #define IRQ_SFD_RX BIT(1) #define IRQ_SFD_TX BIT(2) #define IRQ_RX_PKT_RCVD BIT(3) #define IRQ_TX_PKT_SENT BIT(4) #define IRQ_FRAME_VALID BIT(5) #define IRQ_ADDRESS_VALID BIT(6) #define IRQ_CSMA_CA BIT(7) #define AUTO_TX_TURNAROUND BIT(3) #define ADDON_EN BIT(4) #define FLAG_XMIT 0 #define FLAG_START 1 #define ADF7242_REPORT_CSMA_CA_STAT 0 /* framework doesn't handle yet */ struct adf7242_local { struct spi_device *spi; struct completion tx_complete; struct ieee802154_hw *hw; struct mutex bmux; /* protect SPI messages */ struct spi_message stat_msg; struct spi_transfer stat_xfer; struct dentry *debugfs_root; struct delayed_work work; struct workqueue_struct *wqueue; unsigned long flags; int tx_stat; bool promiscuous; s8 rssi; u8 max_frame_retries; u8 max_cca_retries; u8 max_be; u8 min_be; /* DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ u8 buf[3] ____cacheline_aligned; u8 buf_reg_tx[3]; u8 buf_read_tx[4]; u8 buf_read_rx[4]; u8 buf_stat_rx; u8 buf_stat_tx; u8 buf_cmd; }; static int adf7242_soft_reset(struct adf7242_local *lp, int line); static int adf7242_status(struct adf7242_local *lp, u8 *stat) { int status; mutex_lock(&lp->bmux); status = spi_sync(lp->spi, &lp->stat_msg); *stat = lp->buf_stat_rx; mutex_unlock(&lp->bmux); return status; } static int adf7242_wait_status(struct adf7242_local *lp, unsigned int status, unsigned int mask, int line) { int cnt = 0, ret = 0; u8 stat; do { adf7242_status(lp, &stat); cnt++; } while (((stat & mask) != status) && (cnt < MAX_POLL_LOOPS)); if (cnt >= MAX_POLL_LOOPS) { ret = -ETIMEDOUT; if (!(stat & STAT_RC_READY)) { adf7242_soft_reset(lp, line); adf7242_status(lp, &stat); if ((stat & mask) == status) ret = 0; } if (ret < 0) dev_warn(&lp->spi->dev, "%s:line %d Timeout status 0x%x (%d)\n", __func__, line, stat, cnt); } dev_vdbg(&lp->spi->dev, "%s : loops=%d line %d\n", __func__, cnt, line); return ret; } static int adf7242_wait_rc_ready(struct adf7242_local *lp, int line) { return adf7242_wait_status(lp, STAT_RC_READY | STAT_SPI_READY, STAT_RC_READY | STAT_SPI_READY, line); } static int adf7242_wait_spi_ready(struct adf7242_local *lp, int line) { return adf7242_wait_status(lp, STAT_SPI_READY, STAT_SPI_READY, line); } static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len) { u8 *buf = lp->buf; int status; struct spi_message msg; struct spi_transfer xfer_head = { .len = 2, .tx_buf = buf, }; struct spi_transfer xfer_buf = { .len = len, .tx_buf = data, }; spi_message_init(&msg); spi_message_add_tail(&xfer_head, &msg); spi_message_add_tail(&xfer_buf, &msg); adf7242_wait_spi_ready(lp, __LINE__); mutex_lock(&lp->bmux); buf[0] = CMD_SPI_PKT_WR; buf[1] = len + 2; status = spi_sync(lp->spi, &msg); mutex_unlock(&lp->bmux); return status; } static int adf7242_read_fbuf(struct adf7242_local *lp, u8 *data, size_t len, bool packet_read) { u8 *buf = lp->buf; int status; struct spi_message msg; struct spi_transfer xfer_head = { .len = 3, .tx_buf = buf, .rx_buf = buf, }; struct spi_transfer xfer_buf = { .len = len, .rx_buf = data, }; spi_message_init(&msg); spi_message_add_tail(&xfer_head, &msg); spi_message_add_tail(&xfer_buf, &msg); adf7242_wait_spi_ready(lp, __LINE__); mutex_lock(&lp->bmux); if (packet_read) { buf[0] = CMD_SPI_PKT_RD; buf[1] = CMD_SPI_NOP; buf[2] = 0; /* PHR */ } else { buf[0] = CMD_SPI_PRAM_RD; buf[1] = 0; buf[2] = CMD_SPI_NOP; } status = spi_sync(lp->spi, &msg); mutex_unlock(&lp->bmux); return status; } static int adf7242_read_reg(struct adf7242_local *lp, u16 addr, u8 *data) { int status; struct spi_message msg; struct spi_transfer xfer = { .len = 4, .tx_buf = lp->buf_read_tx, .rx_buf = lp->buf_read_rx, }; adf7242_wait_spi_ready(lp, __LINE__); mutex_lock(&lp->bmux); lp->buf_read_tx[0] = CMD_SPI_MEM_RD(addr); lp->buf_read_tx[1] = addr; lp->buf_read_tx[2] = CMD_SPI_NOP; lp->buf_read_tx[3] = CMD_SPI_NOP; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); status = spi_sync(lp->spi, &msg); if (msg.status) status = msg.status; if (!status) *data = lp->buf_read_rx[3]; mutex_unlock(&lp->bmux); dev_vdbg(&lp->spi->dev, "%s : REG 0x%X, VAL 0x%X\n", __func__, addr, *data); return status; } static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data) { int status; adf7242_wait_spi_ready(lp, __LINE__); mutex_lock(&lp->bmux); lp->buf_reg_tx[0] = CMD_SPI_MEM_WR(addr); lp->buf_reg_tx[1] = addr; lp->buf_reg_tx[2] = data; status = spi_write(lp->spi, lp->buf_reg_tx, 3); mutex_unlock(&lp->bmux); dev_vdbg(&lp->spi->dev, "%s : REG 0x%X, VAL 0x%X\n", __func__, addr, data); return status; } static int adf7242_cmd(struct adf7242_local *lp, unsigned int cmd) { int status; dev_vdbg(&lp->spi->dev, "%s : CMD=0x%X\n", __func__, cmd); if (cmd != CMD_RC_PC_RESET_NO_WAIT) adf7242_wait_rc_ready(lp, __LINE__); mutex_lock(&lp->bmux); lp->buf_cmd = cmd; status = spi_write(lp->spi, &lp->buf_cmd, 1); mutex_unlock(&lp->bmux); return status; } static int adf7242_upload_firmware(struct adf7242_local *lp, u8 *data, u16 len) { struct spi_message msg; struct spi_transfer xfer_buf = { }; int status, i, page = 0; u8 *buf = lp->buf; struct spi_transfer xfer_head = { .len = 2, .tx_buf = buf, }; buf[0] = CMD_SPI_PRAM_WR; buf[1] = 0; spi_message_init(&msg); spi_message_add_tail(&xfer_head, &msg); spi_message_add_tail(&xfer_buf, &msg); for (i = len; i >= 0; i -= PRAM_PAGESIZE) { adf7242_write_reg(lp, REG_PRAMPG, page); xfer_buf.len = (i >= PRAM_PAGESIZE) ? PRAM_PAGESIZE : i; xfer_buf.tx_buf = &data[page * PRAM_PAGESIZE]; mutex_lock(&lp->bmux); status = spi_sync(lp->spi, &msg); mutex_unlock(&lp->bmux); page++; } return status; } static int adf7242_verify_firmware(struct adf7242_local *lp, const u8 *data, size_t len) { #ifdef DEBUG int i, j; unsigned int page; u8 *buf = kmalloc(PRAM_PAGESIZE, GFP_KERNEL); if (!buf) return -ENOMEM; for (page = 0, i = len; i >= 0; i -= PRAM_PAGESIZE, page++) { size_t nb = (i >= PRAM_PAGESIZE) ? PRAM_PAGESIZE : i; adf7242_write_reg(lp, REG_PRAMPG, page); adf7242_read_fbuf(lp, buf, nb, false); for (j = 0; j < nb; j++) { if (buf[j] != data[page * PRAM_PAGESIZE + j]) { kfree(buf); return -EIO; } } } kfree(buf); #endif return 0; } static void adf7242_clear_irqstat(struct adf7242_local *lp) { adf7242_write_reg(lp, REG_IRQ1_SRC1, IRQ_CCA_COMPLETE | IRQ_SFD_RX | IRQ_SFD_TX | IRQ_RX_PKT_RCVD | IRQ_TX_PKT_SENT | IRQ_FRAME_VALID | IRQ_ADDRESS_VALID | IRQ_CSMA_CA); } static int adf7242_cmd_rx(struct adf7242_local *lp) { /* Wait until the ACK is sent */ adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); adf7242_clear_irqstat(lp); mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400)); return adf7242_cmd(lp, CMD_RC_RX); } static void adf7242_rx_cal_work(struct work_struct *work) { struct adf7242_local *lp = container_of(work, struct adf7242_local, work.work); /* Reissuing RC_RX every 400ms - to adjust for offset * drift in receiver (datasheet page 61, OCL section) */ if (!test_bit(FLAG_XMIT, &lp->flags)) { adf7242_cmd(lp, CMD_RC_PHY_RDY); adf7242_cmd_rx(lp); } } static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm) { struct adf7242_local *lp = hw->priv; u8 pwr, bias_ctrl, dbias, tmp; int db = mbm / 100; dev_vdbg(&lp->spi->dev, "%s : Power %d dB\n", __func__, db); if (db > 5 || db < -26) return -EINVAL; db = DIV_ROUND_CLOSEST(db + 29, 2); if (db > 15) { dbias = PA_DBIAS_HIGH_POWER; bias_ctrl = PA_BIAS_HIGH_POWER; } else { dbias = PA_DBIAS_LOW_POWER; bias_ctrl = PA_BIAS_LOW_POWER; } pwr = clamp_t(u8, db, 3, 15); adf7242_read_reg(lp, REG_PA_CFG, &tmp); tmp &= ~PA_BRIDGE_DBIAS(~0); tmp |= PA_BRIDGE_DBIAS(dbias); adf7242_write_reg(lp, REG_PA_CFG, tmp); adf7242_read_reg(lp, REG_PA_BIAS, &tmp); tmp &= ~PA_BIAS_CTRL(~0); tmp |= PA_BIAS_CTRL(bias_ctrl); adf7242_write_reg(lp, REG_PA_BIAS, tmp); adf7242_read_reg(lp, REG_EXTPA_MSC, &tmp); tmp &= ~PA_PWR(~0); tmp |= PA_PWR(pwr); return adf7242_write_reg(lp, REG_EXTPA_MSC, tmp); } static int adf7242_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) { struct adf7242_local *lp = hw->priv; int ret; dev_vdbg(&lp->spi->dev, "%s : min_be=%d max_be=%d retries=%d\n", __func__, min_be, max_be, retries); if (min_be > max_be || max_be > 8 || retries > 5) return -EINVAL; ret = adf7242_write_reg(lp, REG_AUTO_TX1, MAX_FRAME_RETRIES(lp->max_frame_retries) | MAX_CCA_RETRIES(retries)); if (ret) return ret; lp->max_cca_retries = retries; lp->max_be = max_be; lp->min_be = min_be; return adf7242_write_reg(lp, REG_AUTO_TX2, CSMA_MAX_BE(max_be) | CSMA_MIN_BE(min_be)); } static int adf7242_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct adf7242_local *lp = hw->priv; int ret = 0; dev_vdbg(&lp->spi->dev, "%s : Retries = %d\n", __func__, retries); if (retries < -1 || retries > 15) return -EINVAL; if (retries >= 0) ret = adf7242_write_reg(lp, REG_AUTO_TX1, MAX_FRAME_RETRIES(retries) | MAX_CCA_RETRIES(lp->max_cca_retries)); lp->max_frame_retries = retries; return ret; } static int adf7242_ed(struct ieee802154_hw *hw, u8 *level) { struct adf7242_local *lp = hw->priv; *level = lp->rssi; dev_vdbg(&lp->spi->dev, "%s :Exit level=%d\n", __func__, *level); return 0; } static int adf7242_start(struct ieee802154_hw *hw) { struct adf7242_local *lp = hw->priv; adf7242_cmd(lp, CMD_RC_PHY_RDY); adf7242_clear_irqstat(lp); enable_irq(lp->spi->irq); set_bit(FLAG_START, &lp->flags); return adf7242_cmd_rx(lp); } static void adf7242_stop(struct ieee802154_hw *hw) { struct adf7242_local *lp = hw->priv; disable_irq(lp->spi->irq); cancel_delayed_work_sync(&lp->work); adf7242_cmd(lp, CMD_RC_IDLE); clear_bit(FLAG_START, &lp->flags); adf7242_clear_irqstat(lp); } static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct adf7242_local *lp = hw->priv; unsigned long freq; dev_dbg(&lp->spi->dev, "%s :Channel=%d\n", __func__, channel); might_sleep(); WARN_ON(page != 0); WARN_ON(channel < 11); WARN_ON(channel > 26); freq = (2405 + 5 * (channel - 11)) * 100; adf7242_cmd(lp, CMD_RC_PHY_RDY); adf7242_write_reg(lp, REG_CH_FREQ0, freq); adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8); adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16); if (test_bit(FLAG_START, &lp->flags)) return adf7242_cmd_rx(lp); else return adf7242_cmd(lp, CMD_RC_PHY_RDY); } static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { struct adf7242_local *lp = hw->priv; u8 reg; dev_dbg(&lp->spi->dev, "%s :Changed=0x%lX\n", __func__, changed); might_sleep(); if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { u8 addr[8], i; memcpy(addr, &filt->ieee_addr, 8); for (i = 0; i < 8; i++) adf7242_write_reg(lp, REG_IEEE_ADDR_0 + i, addr[i]); } if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 saddr = le16_to_cpu(filt->short_addr); adf7242_write_reg(lp, REG_SHORT_ADDR_0, saddr); adf7242_write_reg(lp, REG_SHORT_ADDR_1, saddr >> 8); } if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 pan_id = le16_to_cpu(filt->pan_id); adf7242_write_reg(lp, REG_PAN_ID0, pan_id); adf7242_write_reg(lp, REG_PAN_ID1, pan_id >> 8); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { adf7242_read_reg(lp, REG_AUTO_CFG, &reg); if (filt->pan_coord) reg |= IS_PANCOORD; else reg &= ~IS_PANCOORD; adf7242_write_reg(lp, REG_AUTO_CFG, reg); } return 0; } static int adf7242_set_promiscuous_mode(struct ieee802154_hw *hw, bool on) { struct adf7242_local *lp = hw->priv; dev_dbg(&lp->spi->dev, "%s : mode %d\n", __func__, on); lp->promiscuous = on; if (on) { adf7242_write_reg(lp, REG_AUTO_CFG, 0); return adf7242_write_reg(lp, REG_FFILT_CFG, ACCEPT_BEACON_FRAMES | ACCEPT_DATA_FRAMES | ACCEPT_MACCMD_FRAMES | ACCEPT_ALL_ADDRESS | ACCEPT_ACK_FRAMES | ACCEPT_RESERVED_FRAMES); } else { adf7242_write_reg(lp, REG_FFILT_CFG, ACCEPT_BEACON_FRAMES | ACCEPT_DATA_FRAMES | ACCEPT_MACCMD_FRAMES | ACCEPT_RESERVED_FRAMES); return adf7242_write_reg(lp, REG_AUTO_CFG, RX_AUTO_ACK_EN); } } static int adf7242_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct adf7242_local *lp = hw->priv; s8 level = clamp_t(s8, mbm / 100, S8_MIN, S8_MAX); dev_dbg(&lp->spi->dev, "%s : level %d\n", __func__, level); return adf7242_write_reg(lp, REG_CCA1, level); } static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct adf7242_local *lp = hw->priv; int ret; /* ensure existing instances of the IRQ handler have completed */ disable_irq(lp->spi->irq); set_bit(FLAG_XMIT, &lp->flags); cancel_delayed_work_sync(&lp->work); reinit_completion(&lp->tx_complete); adf7242_cmd(lp, CMD_RC_PHY_RDY); adf7242_clear_irqstat(lp); ret = adf7242_write_fbuf(lp, skb->data, skb->len); if (ret) goto err; ret = adf7242_cmd(lp, CMD_RC_CSMACA); if (ret) goto err; enable_irq(lp->spi->irq); ret = wait_for_completion_interruptible_timeout(&lp->tx_complete, HZ / 10); if (ret < 0) goto err; if (ret == 0) { dev_dbg(&lp->spi->dev, "Timeout waiting for TX interrupt\n"); ret = -ETIMEDOUT; goto err; } if (lp->tx_stat != SUCCESS) { dev_dbg(&lp->spi->dev, "Error xmit: Retry count exceeded Status=0x%x\n", lp->tx_stat); ret = -ECOMM; } else { ret = 0; } err: clear_bit(FLAG_XMIT, &lp->flags); adf7242_cmd_rx(lp); return ret; } static int adf7242_rx(struct adf7242_local *lp) { struct sk_buff *skb; size_t len; int ret; u8 lqi, len_u8, *data; ret = adf7242_read_reg(lp, 0, &len_u8); if (ret) return ret; len = len_u8; if (!ieee802154_is_valid_psdu_len(len)) { dev_dbg(&lp->spi->dev, "corrupted frame received len %d\n", (int)len); len = IEEE802154_MTU; } skb = dev_alloc_skb(len); if (!skb) { adf7242_cmd_rx(lp); return -ENOMEM; } data = skb_put(skb, len); ret = adf7242_read_fbuf(lp, data, len, true); if (ret < 0) { kfree_skb(skb); adf7242_cmd_rx(lp); return ret; } lqi = data[len - 2]; lp->rssi = data[len - 1]; ret = adf7242_cmd_rx(lp); skb_trim(skb, len - 2); /* Don't put RSSI/LQI or CRC into the frame */ ieee802154_rx_irqsafe(lp->hw, skb, lqi); dev_dbg(&lp->spi->dev, "%s: ret=%d len=%d lqi=%d rssi=%d\n", __func__, ret, (int)len, (int)lqi, lp->rssi); return ret; } static const struct ieee802154_ops adf7242_ops = { .owner = THIS_MODULE, .xmit_sync = adf7242_xmit, .ed = adf7242_ed, .set_channel = adf7242_channel, .set_hw_addr_filt = adf7242_set_hw_addr_filt, .start = adf7242_start, .stop = adf7242_stop, .set_csma_params = adf7242_set_csma_params, .set_frame_retries = adf7242_set_frame_retries, .set_txpower = adf7242_set_txpower, .set_promiscuous_mode = adf7242_set_promiscuous_mode, .set_cca_ed_level = adf7242_set_cca_ed_level, }; static void adf7242_debug(struct adf7242_local *lp, u8 irq1) { #ifdef DEBUG u8 stat; adf7242_status(lp, &stat); dev_dbg(&lp->spi->dev, "%s IRQ1 = %X:\n%s%s%s%s%s%s%s%s\n", __func__, irq1, irq1 & IRQ_CCA_COMPLETE ? "IRQ_CCA_COMPLETE\n" : "", irq1 & IRQ_SFD_RX ? "IRQ_SFD_RX\n" : "", irq1 & IRQ_SFD_TX ? "IRQ_SFD_TX\n" : "", irq1 & IRQ_RX_PKT_RCVD ? "IRQ_RX_PKT_RCVD\n" : "", irq1 & IRQ_TX_PKT_SENT ? "IRQ_TX_PKT_SENT\n" : "", irq1 & IRQ_CSMA_CA ? "IRQ_CSMA_CA\n" : "", irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "", irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : ""); dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n", __func__, stat, stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY", stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR", stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY", stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY", (stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "", (stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "", (stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "", (stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "", (stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : ""); #endif } static irqreturn_t adf7242_isr(int irq, void *data) { struct adf7242_local *lp = data; unsigned int xmit; u8 irq1; mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400)); adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA))) dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n", __func__, irq1); adf7242_debug(lp, irq1); xmit = test_bit(FLAG_XMIT, &lp->flags); if (xmit && (irq1 & IRQ_CSMA_CA)) { adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); if (ADF7242_REPORT_CSMA_CA_STAT) { u8 astat; adf7242_read_reg(lp, REG_AUTO_STATUS, &astat); astat &= AUTO_STATUS_MASK; dev_dbg(&lp->spi->dev, "AUTO_STATUS = %X:\n%s%s%s%s\n", astat, astat == SUCCESS ? "SUCCESS" : "", astat == SUCCESS_DATPEND ? "SUCCESS_DATPEND" : "", astat == FAILURE_CSMACA ? "FAILURE_CSMACA" : "", astat == FAILURE_NOACK ? "FAILURE_NOACK" : ""); /* save CSMA-CA completion status */ lp->tx_stat = astat; } else { lp->tx_stat = SUCCESS; } complete(&lp->tx_complete); adf7242_clear_irqstat(lp); } else if (!xmit && (irq1 & IRQ_RX_PKT_RCVD) && (irq1 & IRQ_FRAME_VALID)) { adf7242_rx(lp); } else if (!xmit && test_bit(FLAG_START, &lp->flags)) { /* Invalid packet received - drop it and restart */ dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X\n", __func__, __LINE__, irq1); adf7242_cmd(lp, CMD_RC_PHY_RDY); adf7242_cmd_rx(lp); } else { /* This can only be xmit without IRQ, likely a RX packet. * we get an TX IRQ shortly - do nothing or let the xmit * timeout handle this */ dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X, xmit %d\n", __func__, __LINE__, irq1, xmit); adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); complete(&lp->tx_complete); adf7242_clear_irqstat(lp); } return IRQ_HANDLED; } static int adf7242_soft_reset(struct adf7242_local *lp, int line) { dev_warn(&lp->spi->dev, "%s (line %d)\n", __func__, line); if (test_bit(FLAG_START, &lp->flags)) disable_irq_nosync(lp->spi->irq); adf7242_cmd(lp, CMD_RC_PC_RESET_NO_WAIT); usleep_range(200, 250); adf7242_write_reg(lp, REG_PKT_CFG, ADDON_EN | BIT(2)); adf7242_cmd(lp, CMD_RC_PHY_RDY); adf7242_set_promiscuous_mode(lp->hw, lp->promiscuous); adf7242_set_csma_params(lp->hw, lp->min_be, lp->max_be, lp->max_cca_retries); adf7242_clear_irqstat(lp); if (test_bit(FLAG_START, &lp->flags)) { enable_irq(lp->spi->irq); return adf7242_cmd(lp, CMD_RC_RX); } return 0; } static int adf7242_hw_init(struct adf7242_local *lp) { int ret; const struct firmware *fw; adf7242_cmd(lp, CMD_RC_RESET); adf7242_cmd(lp, CMD_RC_IDLE); /* get ADF7242 addon firmware * build this driver as module * and place under /lib/firmware/adf7242_firmware.bin * or compile firmware into the kernel. */ ret = request_firmware(&fw, FIRMWARE, &lp->spi->dev); if (ret) { dev_err(&lp->spi->dev, "request_firmware() failed with %d\n", ret); return ret; } ret = adf7242_upload_firmware(lp, (u8 *)fw->data, fw->size); if (ret) { dev_err(&lp->spi->dev, "upload firmware failed with %d\n", ret); release_firmware(fw); return ret; } ret = adf7242_verify_firmware(lp, (u8 *)fw->data, fw->size); if (ret) { dev_err(&lp->spi->dev, "verify firmware failed with %d\n", ret); release_firmware(fw); return ret; } adf7242_cmd(lp, CMD_RC_PC_RESET); release_firmware(fw); adf7242_write_reg(lp, REG_FFILT_CFG, ACCEPT_BEACON_FRAMES | ACCEPT_DATA_FRAMES | ACCEPT_MACCMD_FRAMES | ACCEPT_RESERVED_FRAMES); adf7242_write_reg(lp, REG_AUTO_CFG, RX_AUTO_ACK_EN); adf7242_write_reg(lp, REG_PKT_CFG, ADDON_EN | BIT(2)); adf7242_write_reg(lp, REG_EXTPA_MSC, 0xF1); adf7242_write_reg(lp, REG_RXFE_CFG, 0x1D); adf7242_write_reg(lp, REG_IRQ1_EN0, 0); adf7242_write_reg(lp, REG_IRQ1_EN1, IRQ_RX_PKT_RCVD | IRQ_CSMA_CA); adf7242_clear_irqstat(lp); adf7242_write_reg(lp, REG_IRQ1_SRC0, 0xFF); adf7242_cmd(lp, CMD_RC_IDLE); return 0; } static int adf7242_stats_show(struct seq_file *file, void *offset) { struct adf7242_local *lp = spi_get_drvdata(file->private); u8 stat, irq1; adf7242_status(lp, &stat); adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); seq_printf(file, "IRQ1 = %X:\n%s%s%s%s%s%s%s%s\n", irq1, irq1 & IRQ_CCA_COMPLETE ? "IRQ_CCA_COMPLETE\n" : "", irq1 & IRQ_SFD_RX ? "IRQ_SFD_RX\n" : "", irq1 & IRQ_SFD_TX ? "IRQ_SFD_TX\n" : "", irq1 & IRQ_RX_PKT_RCVD ? "IRQ_RX_PKT_RCVD\n" : "", irq1 & IRQ_TX_PKT_SENT ? "IRQ_TX_PKT_SENT\n" : "", irq1 & IRQ_CSMA_CA ? "IRQ_CSMA_CA\n" : "", irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "", irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : ""); seq_printf(file, "STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n", stat, stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY", stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR", stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY", stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY", (stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "", (stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "", (stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "", (stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "", (stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : ""); seq_printf(file, "RSSI = %d\n", lp->rssi); return 0; } static void adf7242_debugfs_init(struct adf7242_local *lp) { char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-"; strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN); lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL); debugfs_create_devm_seqfile(&lp->spi->dev, "status", lp->debugfs_root, adf7242_stats_show); } static const s32 adf7242_powers[] = { 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700, -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700, -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600, }; static const s32 adf7242_ed_levels[] = { -9000, -8900, -8800, -8700, -8600, -8500, -8400, -8300, -8200, -8100, -8000, -7900, -7800, -7700, -7600, -7500, -7400, -7300, -7200, -7100, -7000, -6900, -6800, -6700, -6600, -6500, -6400, -6300, -6200, -6100, -6000, -5900, -5800, -5700, -5600, -5500, -5400, -5300, -5200, -5100, -5000, -4900, -4800, -4700, -4600, -4500, -4400, -4300, -4200, -4100, -4000, -3900, -3800, -3700, -3600, -3500, -3400, -3200, -3100, -3000 }; static int adf7242_probe(struct spi_device *spi) { struct ieee802154_hw *hw; struct adf7242_local *lp; int ret, irq_type; if (!spi->irq) { dev_err(&spi->dev, "no IRQ specified\n"); return -EINVAL; } hw = ieee802154_alloc_hw(sizeof(*lp), &adf7242_ops); if (!hw) return -ENOMEM; lp = hw->priv; lp->hw = hw; lp->spi = spi; hw->priv = lp; hw->parent = &spi->dev; hw->extra_tx_headroom = 0; /* We support only 2.4 Ghz */ hw->phy->supported.channels[0] = 0x7FFF800; hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_CSMA_PARAMS | IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY); hw->phy->supported.cca_ed_levels = adf7242_ed_levels; hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(adf7242_ed_levels); hw->phy->cca.mode = NL802154_CCA_ENERGY; hw->phy->supported.tx_powers = adf7242_powers; hw->phy->supported.tx_powers_size = ARRAY_SIZE(adf7242_powers); hw->phy->supported.min_minbe = 0; hw->phy->supported.max_minbe = 8; hw->phy->supported.min_maxbe = 3; hw->phy->supported.max_maxbe = 8; hw->phy->supported.min_frame_retries = 0; hw->phy->supported.max_frame_retries = 15; hw->phy->supported.min_csma_backoffs = 0; hw->phy->supported.max_csma_backoffs = 5; ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); mutex_init(&lp->bmux); init_completion(&lp->tx_complete); /* Setup Status Message */ lp->stat_xfer.len = 1; lp->stat_xfer.tx_buf = &lp->buf_stat_tx; lp->stat_xfer.rx_buf = &lp->buf_stat_rx; lp->buf_stat_tx = CMD_SPI_NOP; spi_message_init(&lp->stat_msg); spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg); spi_set_drvdata(spi, lp); INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), WQ_MEM_RECLAIM); if (unlikely(!lp->wqueue)) { ret = -ENOMEM; goto err_alloc_wq; } ret = adf7242_hw_init(lp); if (ret) goto err_hw_init; irq_type = irq_get_trigger_type(spi->irq); if (!irq_type) irq_type = IRQF_TRIGGER_HIGH; ret = devm_request_threaded_irq(&spi->dev, spi->irq, NULL, adf7242_isr, irq_type | IRQF_ONESHOT, dev_name(&spi->dev), lp); if (ret) goto err_hw_init; disable_irq(spi->irq); ret = ieee802154_register_hw(lp->hw); if (ret) goto err_hw_init; dev_set_drvdata(&spi->dev, lp); adf7242_debugfs_init(lp); dev_info(&spi->dev, "mac802154 IRQ-%d registered\n", spi->irq); return ret; err_hw_init: destroy_workqueue(lp->wqueue); err_alloc_wq: mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); return ret; } static void adf7242_remove(struct spi_device *spi) { struct adf7242_local *lp = spi_get_drvdata(spi); debugfs_remove_recursive(lp->debugfs_root); ieee802154_unregister_hw(lp->hw); cancel_delayed_work_sync(&lp->work); destroy_workqueue(lp->wqueue); mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); } static const struct of_device_id adf7242_of_match[] = { { .compatible = "adi,adf7242", }, { .compatible = "adi,adf7241", }, { }, }; MODULE_DEVICE_TABLE(of, adf7242_of_match); static const struct spi_device_id adf7242_device_id[] = { { .name = "adf7242", }, { .name = "adf7241", }, { }, }; MODULE_DEVICE_TABLE(spi, adf7242_device_id); static struct spi_driver adf7242_driver = { .id_table = adf7242_device_id, .driver = { .of_match_table = adf7242_of_match, .name = "adf7242", }, .probe = adf7242_probe, .remove = adf7242_remove, }; module_spi_driver(adf7242_driver); MODULE_AUTHOR("Michael Hennerich <[email protected]>"); MODULE_DESCRIPTION("ADF7242 IEEE802.15.4 Transceiver Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE);
linux-master
drivers/net/ieee802154/adf7242.c
/* * http://www.cascoda.com/products/ca-821x/ * Copyright (c) 2016, Cascoda, Ltd. * All rights reserved. * * This code is dual-licensed under both GPLv2 and 3-clause BSD. What follows is * the license notice for both respectively. * ******************************************************************************* * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * ******************************************************************************* * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/cdev.h> #include <linux/clk-provider.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/gpio.h> #include <linux/ieee802154.h> #include <linux/io.h> #include <linux/kfifo.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <net/ieee802154_netdev.h> #include <net/mac802154.h> #define DRIVER_NAME "ca8210" /* external clock frequencies */ #define ONE_MHZ 1000000 #define TWO_MHZ (2 * ONE_MHZ) #define FOUR_MHZ (4 * ONE_MHZ) #define EIGHT_MHZ (8 * ONE_MHZ) #define SIXTEEN_MHZ (16 * ONE_MHZ) /* spi constants */ #define CA8210_SPI_BUF_SIZE 256 #define CA8210_SYNC_TIMEOUT 1000 /* Timeout for synchronous commands [ms] */ /* test interface constants */ #define CA8210_TEST_INT_FILE_NAME "ca8210_test" #define CA8210_TEST_INT_FIFO_SIZE 256 /* HWME attribute IDs */ #define HWME_EDTHRESHOLD (0x04) #define HWME_EDVALUE (0x06) #define HWME_SYSCLKOUT (0x0F) #define HWME_LQILIMIT (0x11) /* TDME attribute IDs */ #define TDME_CHANNEL (0x00) #define TDME_ATM_CONFIG (0x06) #define MAX_HWME_ATTRIBUTE_SIZE 16 #define MAX_TDME_ATTRIBUTE_SIZE 2 /* PHY/MAC PIB Attribute Enumerations */ #define PHY_CURRENT_CHANNEL (0x00) #define PHY_TRANSMIT_POWER (0x02) #define PHY_CCA_MODE (0x03) #define MAC_ASSOCIATION_PERMIT (0x41) #define MAC_AUTO_REQUEST (0x42) #define MAC_BATT_LIFE_EXT (0x43) #define MAC_BATT_LIFE_EXT_PERIODS (0x44) #define MAC_BEACON_PAYLOAD (0x45) #define MAC_BEACON_PAYLOAD_LENGTH (0x46) #define MAC_BEACON_ORDER (0x47) #define MAC_GTS_PERMIT (0x4d) #define MAC_MAX_CSMA_BACKOFFS (0x4e) #define MAC_MIN_BE (0x4f) #define MAC_PAN_ID (0x50) #define MAC_PROMISCUOUS_MODE (0x51) #define MAC_RX_ON_WHEN_IDLE (0x52) #define MAC_SHORT_ADDRESS (0x53) #define MAC_SUPERFRAME_ORDER (0x54) #define MAC_ASSOCIATED_PAN_COORD (0x56) #define MAC_MAX_BE (0x57) #define MAC_MAX_FRAME_RETRIES (0x59) #define MAC_RESPONSE_WAIT_TIME (0x5A) #define MAC_SECURITY_ENABLED (0x5D) #define MAC_AUTO_REQUEST_SECURITY_LEVEL (0x78) #define MAC_AUTO_REQUEST_KEY_ID_MODE (0x79) #define NS_IEEE_ADDRESS (0xFF) /* Non-standard IEEE address */ /* MAC Address Mode Definitions */ #define MAC_MODE_NO_ADDR (0x00) #define MAC_MODE_SHORT_ADDR (0x02) #define MAC_MODE_LONG_ADDR (0x03) /* MAC constants */ #define MAX_BEACON_OVERHEAD (75) #define MAX_BEACON_PAYLOAD_LENGTH (IEEE802154_MTU - MAX_BEACON_OVERHEAD) #define MAX_ATTRIBUTE_SIZE (122) #define MAX_DATA_SIZE (114) #define CA8210_VALID_CHANNELS (0x07FFF800) /* MAC workarounds for V1.1 and MPW silicon (V0.x) */ #define CA8210_MAC_WORKAROUNDS (0) #define CA8210_MAC_MPW (0) /* memory manipulation macros */ #define LS_BYTE(x) ((u8)((x) & 0xFF)) #define MS_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) /* message ID codes in SPI commands */ /* downstream */ #define MCPS_DATA_REQUEST (0x00) #define MLME_ASSOCIATE_REQUEST (0x02) #define MLME_ASSOCIATE_RESPONSE (0x03) #define MLME_DISASSOCIATE_REQUEST (0x04) #define MLME_GET_REQUEST (0x05) #define MLME_ORPHAN_RESPONSE (0x06) #define MLME_RESET_REQUEST (0x07) #define MLME_RX_ENABLE_REQUEST (0x08) #define MLME_SCAN_REQUEST (0x09) #define MLME_SET_REQUEST (0x0A) #define MLME_START_REQUEST (0x0B) #define MLME_POLL_REQUEST (0x0D) #define HWME_SET_REQUEST (0x0E) #define HWME_GET_REQUEST (0x0F) #define TDME_SETSFR_REQUEST (0x11) #define TDME_GETSFR_REQUEST (0x12) #define TDME_SET_REQUEST (0x14) /* upstream */ #define MCPS_DATA_INDICATION (0x00) #define MCPS_DATA_CONFIRM (0x01) #define MLME_RESET_CONFIRM (0x0A) #define MLME_SET_CONFIRM (0x0E) #define MLME_START_CONFIRM (0x0F) #define HWME_SET_CONFIRM (0x12) #define HWME_GET_CONFIRM (0x13) #define HWME_WAKEUP_INDICATION (0x15) #define TDME_SETSFR_CONFIRM (0x17) /* SPI command IDs */ /* bit indicating a confirm or indication from slave to master */ #define SPI_S2M (0x20) /* bit indicating a synchronous message */ #define SPI_SYN (0x40) /* SPI command definitions */ #define SPI_IDLE (0xFF) #define SPI_NACK (0xF0) #define SPI_MCPS_DATA_REQUEST (MCPS_DATA_REQUEST) #define SPI_MCPS_DATA_INDICATION (MCPS_DATA_INDICATION + SPI_S2M) #define SPI_MCPS_DATA_CONFIRM (MCPS_DATA_CONFIRM + SPI_S2M) #define SPI_MLME_ASSOCIATE_REQUEST (MLME_ASSOCIATE_REQUEST) #define SPI_MLME_RESET_REQUEST (MLME_RESET_REQUEST + SPI_SYN) #define SPI_MLME_SET_REQUEST (MLME_SET_REQUEST + SPI_SYN) #define SPI_MLME_START_REQUEST (MLME_START_REQUEST + SPI_SYN) #define SPI_MLME_RESET_CONFIRM (MLME_RESET_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_MLME_SET_CONFIRM (MLME_SET_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_MLME_START_CONFIRM (MLME_START_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_HWME_SET_REQUEST (HWME_SET_REQUEST + SPI_SYN) #define SPI_HWME_GET_REQUEST (HWME_GET_REQUEST + SPI_SYN) #define SPI_HWME_SET_CONFIRM (HWME_SET_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_HWME_GET_CONFIRM (HWME_GET_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_HWME_WAKEUP_INDICATION (HWME_WAKEUP_INDICATION + SPI_S2M) #define SPI_TDME_SETSFR_REQUEST (TDME_SETSFR_REQUEST + SPI_SYN) #define SPI_TDME_SET_REQUEST (TDME_SET_REQUEST + SPI_SYN) #define SPI_TDME_SETSFR_CONFIRM (TDME_SETSFR_CONFIRM + SPI_S2M + SPI_SYN) /* TDME SFR addresses */ /* Page 0 */ #define CA8210_SFR_PACFG (0xB1) #define CA8210_SFR_MACCON (0xD8) #define CA8210_SFR_PACFGIB (0xFE) /* Page 1 */ #define CA8210_SFR_LOTXCAL (0xBF) #define CA8210_SFR_PTHRH (0xD1) #define CA8210_SFR_PRECFG (0xD3) #define CA8210_SFR_LNAGX40 (0xE1) #define CA8210_SFR_LNAGX41 (0xE2) #define CA8210_SFR_LNAGX42 (0xE3) #define CA8210_SFR_LNAGX43 (0xE4) #define CA8210_SFR_LNAGX44 (0xE5) #define CA8210_SFR_LNAGX45 (0xE6) #define CA8210_SFR_LNAGX46 (0xE7) #define CA8210_SFR_LNAGX47 (0xE9) #define PACFGIB_DEFAULT_CURRENT (0x3F) #define PTHRH_DEFAULT_THRESHOLD (0x5A) #define LNAGX40_DEFAULT_GAIN (0x29) /* 10dB */ #define LNAGX41_DEFAULT_GAIN (0x54) /* 21dB */ #define LNAGX42_DEFAULT_GAIN (0x6C) /* 27dB */ #define LNAGX43_DEFAULT_GAIN (0x7A) /* 30dB */ #define LNAGX44_DEFAULT_GAIN (0x84) /* 33dB */ #define LNAGX45_DEFAULT_GAIN (0x8B) /* 34dB */ #define LNAGX46_DEFAULT_GAIN (0x92) /* 36dB */ #define LNAGX47_DEFAULT_GAIN (0x96) /* 37dB */ #define CA8210_IOCTL_HARD_RESET (0x00) /* Structs/Enums */ /** * struct cas_control - spi transfer structure * @msg: spi_message for each exchange * @transfer: spi_transfer for each exchange * @tx_buf: source array for transmission * @tx_in_buf: array storing bytes received during transmission * @priv: pointer to private data * * This structure stores all the necessary data passed around during a single * spi exchange. */ struct cas_control { struct spi_message msg; struct spi_transfer transfer; u8 tx_buf[CA8210_SPI_BUF_SIZE]; u8 tx_in_buf[CA8210_SPI_BUF_SIZE]; struct ca8210_priv *priv; }; /** * struct ca8210_test - ca8210 test interface structure * @ca8210_dfs_spi_int: pointer to the entry in the debug fs for this device * @up_fifo: fifo for upstream messages * @readq: read wait queue * * This structure stores all the data pertaining to the debug interface */ struct ca8210_test { struct dentry *ca8210_dfs_spi_int; struct kfifo up_fifo; wait_queue_head_t readq; }; /** * struct ca8210_priv - ca8210 private data structure * @spi: pointer to the ca8210 spi device object * @hw: pointer to the ca8210 ieee802154_hw object * @hw_registered: true if hw has been registered with ieee802154 * @lock: spinlock protecting the private data area * @mlme_workqueue: workqueue for triggering MLME Reset * @irq_workqueue: workqueue for irq processing * @tx_skb: current socket buffer to transmit * @nextmsduhandle: msdu handle to pass to the 15.4 MAC layer for the * next transmission * @clk: external clock provided by the ca8210 * @last_dsn: sequence number of last data packet received, for * resend detection * @test: test interface data section for this instance * @async_tx_pending: true if an asynchronous transmission was started and * is not complete * @sync_command_response: pointer to buffer to fill with sync response * @ca8210_is_awake: nonzero if ca8210 is initialised, ready for comms * @sync_down: counts number of downstream synchronous commands * @sync_up: counts number of upstream synchronous commands * @spi_transfer_complete: completion object for a single spi_transfer * @sync_exchange_complete: completion object for a complete synchronous API * exchange * @promiscuous: whether the ca8210 is in promiscuous mode or not * @retries: records how many times the current pending spi * transfer has been retried */ struct ca8210_priv { struct spi_device *spi; struct ieee802154_hw *hw; bool hw_registered; spinlock_t lock; struct workqueue_struct *mlme_workqueue; struct workqueue_struct *irq_workqueue; struct sk_buff *tx_skb; u8 nextmsduhandle; struct clk *clk; int last_dsn; struct ca8210_test test; bool async_tx_pending; u8 *sync_command_response; struct completion ca8210_is_awake; int sync_down, sync_up; struct completion spi_transfer_complete, sync_exchange_complete; bool promiscuous; int retries; }; /** * struct work_priv_container - link between a work object and the relevant * device's private data * @work: work object being executed * @priv: device's private data section * */ struct work_priv_container { struct work_struct work; struct ca8210_priv *priv; }; /** * struct ca8210_platform_data - ca8210 platform data structure * @extclockenable: true if the external clock is to be enabled * @extclockfreq: frequency of the external clock * @extclockgpio: ca8210 output gpio of the external clock * @gpio_reset: gpio number of ca8210 reset line * @gpio_irq: gpio number of ca8210 interrupt line * @irq_id: identifier for the ca8210 irq * */ struct ca8210_platform_data { bool extclockenable; unsigned int extclockfreq; unsigned int extclockgpio; int gpio_reset; int gpio_irq; int irq_id; }; /** * struct fulladdr - full MAC addressing information structure * @mode: address mode (none, short, extended) * @pan_id: 16-bit LE pan id * @address: LE address, variable length as specified by mode * */ struct fulladdr { u8 mode; u8 pan_id[2]; u8 address[8]; }; /** * union macaddr: generic MAC address container * @short_address: 16-bit short address * @ieee_address: 64-bit extended address as LE byte array * */ union macaddr { u16 short_address; u8 ieee_address[8]; }; /** * struct secspec: security specification for SAP commands * @security_level: 0-7, controls level of authentication & encryption * @key_id_mode: 0-3, specifies how to obtain key * @key_source: extended key retrieval data * @key_index: single-byte key identifier * */ struct secspec { u8 security_level; u8 key_id_mode; u8 key_source[8]; u8 key_index; }; /* downlink functions parameter set definitions */ struct mcps_data_request_pset { u8 src_addr_mode; struct fulladdr dst; u8 msdu_length; u8 msdu_handle; u8 tx_options; u8 msdu[MAX_DATA_SIZE]; }; struct mlme_set_request_pset { u8 pib_attribute; u8 pib_attribute_index; u8 pib_attribute_length; u8 pib_attribute_value[MAX_ATTRIBUTE_SIZE]; }; struct hwme_set_request_pset { u8 hw_attribute; u8 hw_attribute_length; u8 hw_attribute_value[MAX_HWME_ATTRIBUTE_SIZE]; }; struct hwme_get_request_pset { u8 hw_attribute; }; struct tdme_setsfr_request_pset { u8 sfr_page; u8 sfr_address; u8 sfr_value; }; /* uplink functions parameter set definitions */ struct hwme_set_confirm_pset { u8 status; u8 hw_attribute; }; struct hwme_get_confirm_pset { u8 status; u8 hw_attribute; u8 hw_attribute_length; u8 hw_attribute_value[MAX_HWME_ATTRIBUTE_SIZE]; }; struct tdme_setsfr_confirm_pset { u8 status; u8 sfr_page; u8 sfr_address; }; struct mac_message { u8 command_id; u8 length; union { struct mcps_data_request_pset data_req; struct mlme_set_request_pset set_req; struct hwme_set_request_pset hwme_set_req; struct hwme_get_request_pset hwme_get_req; struct tdme_setsfr_request_pset tdme_set_sfr_req; struct hwme_set_confirm_pset hwme_set_cnf; struct hwme_get_confirm_pset hwme_get_cnf; struct tdme_setsfr_confirm_pset tdme_set_sfr_cnf; u8 u8param; u8 status; u8 payload[148]; } pdata; }; union pa_cfg_sfr { struct { u8 bias_current_trim : 3; u8 /* reserved */ : 1; u8 buffer_capacitor_trim : 3; u8 boost : 1; }; u8 paib; }; struct preamble_cfg_sfr { u8 timeout_symbols : 3; u8 acquisition_symbols : 3; u8 search_symbols : 2; }; static int (*cascoda_api_upstream)( const u8 *buf, size_t len, void *device_ref ); /** * link_to_linux_err() - Translates an 802.15.4 return code into the closest * linux error * @link_status: 802.15.4 status code * * Return: 0 or Linux error code */ static int link_to_linux_err(int link_status) { if (link_status < 0) { /* status is already a Linux code */ return link_status; } switch (link_status) { case IEEE802154_SUCCESS: case IEEE802154_REALIGNMENT: return 0; case IEEE802154_IMPROPER_KEY_TYPE: return -EKEYREJECTED; case IEEE802154_IMPROPER_SECURITY_LEVEL: case IEEE802154_UNSUPPORTED_LEGACY: case IEEE802154_DENIED: return -EACCES; case IEEE802154_BEACON_LOST: case IEEE802154_NO_ACK: case IEEE802154_NO_BEACON: return -ENETUNREACH; case IEEE802154_CHANNEL_ACCESS_FAILURE: case IEEE802154_TX_ACTIVE: case IEEE802154_SCAN_IN_PROGRESS: return -EBUSY; case IEEE802154_DISABLE_TRX_FAILURE: case IEEE802154_OUT_OF_CAP: return -EAGAIN; case IEEE802154_FRAME_TOO_LONG: return -EMSGSIZE; case IEEE802154_INVALID_GTS: case IEEE802154_PAST_TIME: return -EBADSLT; case IEEE802154_INVALID_HANDLE: return -EBADMSG; case IEEE802154_INVALID_PARAMETER: case IEEE802154_UNSUPPORTED_ATTRIBUTE: case IEEE802154_ON_TIME_TOO_LONG: case IEEE802154_INVALID_INDEX: return -EINVAL; case IEEE802154_NO_DATA: return -ENODATA; case IEEE802154_NO_SHORT_ADDRESS: return -EFAULT; case IEEE802154_PAN_ID_CONFLICT: return -EADDRINUSE; case IEEE802154_TRANSACTION_EXPIRED: return -ETIME; case IEEE802154_TRANSACTION_OVERFLOW: return -ENOBUFS; case IEEE802154_UNAVAILABLE_KEY: return -ENOKEY; case IEEE802154_INVALID_ADDRESS: return -ENXIO; case IEEE802154_TRACKING_OFF: case IEEE802154_SUPERFRAME_OVERLAP: return -EREMOTEIO; case IEEE802154_LIMIT_REACHED: return -EDQUOT; case IEEE802154_READ_ONLY: return -EROFS; default: return -EPROTO; } } /** * ca8210_test_int_driver_write() - Writes a message to the test interface to be * read by the userspace * @buf: Buffer containing upstream message * @len: length of message to write * @spi: SPI device of message originator * * Return: 0 or linux error code */ static int ca8210_test_int_driver_write( const u8 *buf, size_t len, void *spi ) { struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_test *test = &priv->test; char *fifo_buffer; int i; dev_dbg( &priv->spi->dev, "test_interface: Buffering upstream message:\n" ); for (i = 0; i < len; i++) dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]); fifo_buffer = kmemdup(buf, len, GFP_KERNEL); if (!fifo_buffer) return -ENOMEM; kfifo_in(&test->up_fifo, &fifo_buffer, 4); wake_up_interruptible(&priv->test.readq); return 0; } /* SPI Operation */ static int ca8210_net_rx( struct ieee802154_hw *hw, u8 *command, size_t len ); static u8 mlme_reset_request_sync( u8 set_default_pib, void *device_ref ); static int ca8210_spi_transfer( struct spi_device *spi, const u8 *buf, size_t len ); /** * ca8210_reset_send() - Hard resets the ca8210 for a given time * @spi: Pointer to target ca8210 spi device * @ms: Milliseconds to hold the reset line low for */ static void ca8210_reset_send(struct spi_device *spi, unsigned int ms) { struct ca8210_platform_data *pdata = spi->dev.platform_data; struct ca8210_priv *priv = spi_get_drvdata(spi); long status; gpio_set_value(pdata->gpio_reset, 0); reinit_completion(&priv->ca8210_is_awake); msleep(ms); gpio_set_value(pdata->gpio_reset, 1); priv->promiscuous = false; /* Wait until wakeup indication seen */ status = wait_for_completion_interruptible_timeout( &priv->ca8210_is_awake, msecs_to_jiffies(CA8210_SYNC_TIMEOUT) ); if (status == 0) { dev_crit( &spi->dev, "Fatal: No wakeup from ca8210 after reset!\n" ); } dev_dbg(&spi->dev, "Reset the device\n"); } /** * ca8210_mlme_reset_worker() - Resets the MLME, Called when the MAC OVERFLOW * condition happens. * @work: Pointer to work being executed */ static void ca8210_mlme_reset_worker(struct work_struct *work) { struct work_priv_container *wpc = container_of( work, struct work_priv_container, work ); struct ca8210_priv *priv = wpc->priv; mlme_reset_request_sync(0, priv->spi); kfree(wpc); } /** * ca8210_rx_done() - Calls various message dispatches responding to a received * command * @cas_ctl: Pointer to the cas_control object for the relevant spi transfer * * Presents a received SAP command from the ca8210 to the Cascoda EVBME, test * interface and network driver. */ static void ca8210_rx_done(struct cas_control *cas_ctl) { u8 *buf; unsigned int len; struct work_priv_container *mlme_reset_wpc; struct ca8210_priv *priv = cas_ctl->priv; buf = cas_ctl->tx_in_buf; len = buf[1] + 2; if (len > CA8210_SPI_BUF_SIZE) { dev_crit( &priv->spi->dev, "Received packet len (%u) erroneously long\n", len ); goto finish; } if (buf[0] & SPI_SYN) { if (priv->sync_command_response) { memcpy(priv->sync_command_response, buf, len); complete(&priv->sync_exchange_complete); } else { if (cascoda_api_upstream) cascoda_api_upstream(buf, len, priv->spi); priv->sync_up++; } } else { if (cascoda_api_upstream) cascoda_api_upstream(buf, len, priv->spi); } ca8210_net_rx(priv->hw, buf, len); if (buf[0] == SPI_MCPS_DATA_CONFIRM) { if (buf[3] == IEEE802154_TRANSACTION_OVERFLOW) { dev_info( &priv->spi->dev, "Waiting for transaction overflow to stabilise...\n"); msleep(2000); dev_info( &priv->spi->dev, "Resetting MAC...\n"); mlme_reset_wpc = kmalloc(sizeof(*mlme_reset_wpc), GFP_KERNEL); if (!mlme_reset_wpc) goto finish; INIT_WORK( &mlme_reset_wpc->work, ca8210_mlme_reset_worker ); mlme_reset_wpc->priv = priv; queue_work(priv->mlme_workqueue, &mlme_reset_wpc->work); } } else if (buf[0] == SPI_HWME_WAKEUP_INDICATION) { dev_notice( &priv->spi->dev, "Wakeup indication received, reason:\n" ); switch (buf[2]) { case 0: dev_notice( &priv->spi->dev, "Transceiver woken up from Power Up / System Reset\n" ); break; case 1: dev_notice( &priv->spi->dev, "Watchdog Timer Time-Out\n" ); break; case 2: dev_notice( &priv->spi->dev, "Transceiver woken up from Power-Off by Sleep Timer Time-Out\n"); break; case 3: dev_notice( &priv->spi->dev, "Transceiver woken up from Power-Off by GPIO Activity\n" ); break; case 4: dev_notice( &priv->spi->dev, "Transceiver woken up from Standby by Sleep Timer Time-Out\n" ); break; case 5: dev_notice( &priv->spi->dev, "Transceiver woken up from Standby by GPIO Activity\n" ); break; case 6: dev_notice( &priv->spi->dev, "Sleep-Timer Time-Out in Active Mode\n" ); break; default: dev_warn(&priv->spi->dev, "Wakeup reason unknown\n"); break; } complete(&priv->ca8210_is_awake); } finish:; } static void ca8210_remove(struct spi_device *spi_device); /** * ca8210_spi_transfer_complete() - Called when a single spi transfer has * completed * @context: Pointer to the cas_control object for the finished transfer */ static void ca8210_spi_transfer_complete(void *context) { struct cas_control *cas_ctl = context; struct ca8210_priv *priv = cas_ctl->priv; bool duplex_rx = false; int i; u8 retry_buffer[CA8210_SPI_BUF_SIZE]; if ( cas_ctl->tx_in_buf[0] == SPI_NACK || (cas_ctl->tx_in_buf[0] == SPI_IDLE && cas_ctl->tx_in_buf[1] == SPI_NACK) ) { /* ca8210 is busy */ dev_info(&priv->spi->dev, "ca8210 was busy during attempted write\n"); if (cas_ctl->tx_buf[0] == SPI_IDLE) { dev_warn( &priv->spi->dev, "IRQ servicing NACKd, dropping transfer\n" ); kfree(cas_ctl); return; } if (priv->retries > 3) { dev_err(&priv->spi->dev, "too many retries!\n"); kfree(cas_ctl); ca8210_remove(priv->spi); return; } memcpy(retry_buffer, cas_ctl->tx_buf, CA8210_SPI_BUF_SIZE); kfree(cas_ctl); ca8210_spi_transfer( priv->spi, retry_buffer, CA8210_SPI_BUF_SIZE ); priv->retries++; dev_info(&priv->spi->dev, "retried spi write\n"); return; } else if ( cas_ctl->tx_in_buf[0] != SPI_IDLE && cas_ctl->tx_in_buf[0] != SPI_NACK ) { duplex_rx = true; } if (duplex_rx) { dev_dbg(&priv->spi->dev, "READ CMD DURING TX\n"); for (i = 0; i < cas_ctl->tx_in_buf[1] + 2; i++) dev_dbg( &priv->spi->dev, "%#03x\n", cas_ctl->tx_in_buf[i] ); ca8210_rx_done(cas_ctl); } complete(&priv->spi_transfer_complete); kfree(cas_ctl); priv->retries = 0; } /** * ca8210_spi_transfer() - Initiate duplex spi transfer with ca8210 * @spi: Pointer to spi device for transfer * @buf: Octet array to send * @len: length of the buffer being sent * * Return: 0 or linux error code */ static int ca8210_spi_transfer( struct spi_device *spi, const u8 *buf, size_t len ) { int i, status = 0; struct ca8210_priv *priv; struct cas_control *cas_ctl; if (!spi) { pr_crit("NULL spi device passed to %s\n", __func__); return -ENODEV; } priv = spi_get_drvdata(spi); reinit_completion(&priv->spi_transfer_complete); dev_dbg(&spi->dev, "%s called\n", __func__); cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC); if (!cas_ctl) return -ENOMEM; cas_ctl->priv = priv; memset(cas_ctl->tx_buf, SPI_IDLE, CA8210_SPI_BUF_SIZE); memset(cas_ctl->tx_in_buf, SPI_IDLE, CA8210_SPI_BUF_SIZE); memcpy(cas_ctl->tx_buf, buf, len); for (i = 0; i < len; i++) dev_dbg(&spi->dev, "%#03x\n", cas_ctl->tx_buf[i]); spi_message_init(&cas_ctl->msg); cas_ctl->transfer.tx_nbits = 1; /* 1 MOSI line */ cas_ctl->transfer.rx_nbits = 1; /* 1 MISO line */ cas_ctl->transfer.speed_hz = 0; /* Use device setting */ cas_ctl->transfer.bits_per_word = 0; /* Use device setting */ cas_ctl->transfer.tx_buf = cas_ctl->tx_buf; cas_ctl->transfer.rx_buf = cas_ctl->tx_in_buf; cas_ctl->transfer.delay.value = 0; cas_ctl->transfer.delay.unit = SPI_DELAY_UNIT_USECS; cas_ctl->transfer.cs_change = 0; cas_ctl->transfer.len = sizeof(struct mac_message); cas_ctl->msg.complete = ca8210_spi_transfer_complete; cas_ctl->msg.context = cas_ctl; spi_message_add_tail( &cas_ctl->transfer, &cas_ctl->msg ); status = spi_async(spi, &cas_ctl->msg); if (status < 0) { dev_crit( &spi->dev, "status %d from spi_sync in write\n", status ); } return status; } /** * ca8210_spi_exchange() - Exchange API/SAP commands with the radio * @buf: Octet array of command being sent downstream * @len: length of buf * @response: buffer for storing synchronous response * @device_ref: spi_device pointer for ca8210 * * Effectively calls ca8210_spi_transfer to write buf[] to the spi, then for * synchronous commands waits for the corresponding response to be read from * the spi before returning. The response is written to the response parameter. * * Return: 0 or linux error code */ static int ca8210_spi_exchange( const u8 *buf, size_t len, u8 *response, void *device_ref ) { int status = 0; struct spi_device *spi = device_ref; struct ca8210_priv *priv = spi->dev.driver_data; long wait_remaining; if ((buf[0] & SPI_SYN) && response) { /* if sync wait for confirm */ reinit_completion(&priv->sync_exchange_complete); priv->sync_command_response = response; } do { reinit_completion(&priv->spi_transfer_complete); status = ca8210_spi_transfer(priv->spi, buf, len); if (status) { dev_warn( &spi->dev, "spi write failed, returned %d\n", status ); if (status == -EBUSY) continue; if (((buf[0] & SPI_SYN) && response)) complete(&priv->sync_exchange_complete); goto cleanup; } wait_remaining = wait_for_completion_interruptible_timeout( &priv->spi_transfer_complete, msecs_to_jiffies(1000) ); if (wait_remaining == -ERESTARTSYS) { status = -ERESTARTSYS; } else if (wait_remaining == 0) { dev_err( &spi->dev, "SPI downstream transfer timed out!\n" ); status = -ETIME; goto cleanup; } } while (status < 0); if (!((buf[0] & SPI_SYN) && response)) goto cleanup; wait_remaining = wait_for_completion_interruptible_timeout( &priv->sync_exchange_complete, msecs_to_jiffies(CA8210_SYNC_TIMEOUT) ); if (wait_remaining == -ERESTARTSYS) { status = -ERESTARTSYS; } else if (wait_remaining == 0) { dev_err( &spi->dev, "Synchronous confirm timeout\n" ); status = -ETIME; } cleanup: priv->sync_command_response = NULL; return status; } /** * ca8210_interrupt_handler() - Called when an irq is received from the ca8210 * @irq: Id of the irq being handled * @dev_id: Pointer passed by the system, pointing to the ca8210's private data * * This function is called when the irq line from the ca8210 is asserted, * signifying that the ca8210 has a message to send upstream to us. Starts the * asynchronous spi read. * * Return: irq return code */ static irqreturn_t ca8210_interrupt_handler(int irq, void *dev_id) { struct ca8210_priv *priv = dev_id; int status; dev_dbg(&priv->spi->dev, "irq: Interrupt occurred\n"); do { status = ca8210_spi_transfer(priv->spi, NULL, 0); if (status && (status != -EBUSY)) { dev_warn( &priv->spi->dev, "spi read failed, returned %d\n", status ); } } while (status == -EBUSY); return IRQ_HANDLED; } static int (*cascoda_api_downstream)( const u8 *buf, size_t len, u8 *response, void *device_ref ) = ca8210_spi_exchange; /* Cascoda API / 15.4 SAP Primitives */ /** * tdme_setsfr_request_sync() - TDME_SETSFR_request/confirm according to API * @sfr_page: SFR Page * @sfr_address: SFR Address * @sfr_value: SFR Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of TDME-SETSFR.confirm */ static u8 tdme_setsfr_request_sync( u8 sfr_page, u8 sfr_address, u8 sfr_value, void *device_ref ) { int ret; struct mac_message command, response; struct spi_device *spi = device_ref; command.command_id = SPI_TDME_SETSFR_REQUEST; command.length = 3; command.pdata.tdme_set_sfr_req.sfr_page = sfr_page; command.pdata.tdme_set_sfr_req.sfr_address = sfr_address; command.pdata.tdme_set_sfr_req.sfr_value = sfr_value; response.command_id = SPI_IDLE; ret = cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref ); if (ret) { dev_crit(&spi->dev, "cascoda_api_downstream returned %d", ret); return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_TDME_SETSFR_CONFIRM) { dev_crit( &spi->dev, "sync response to SPI_TDME_SETSFR_REQUEST was not SPI_TDME_SETSFR_CONFIRM, it was %d\n", response.command_id ); return IEEE802154_SYSTEM_ERROR; } return response.pdata.tdme_set_sfr_cnf.status; } /** * tdme_chipinit() - TDME Chip Register Default Initialisation Macro * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of API calls */ static u8 tdme_chipinit(void *device_ref) { u8 status = IEEE802154_SUCCESS; u8 sfr_address; struct spi_device *spi = device_ref; struct preamble_cfg_sfr pre_cfg_value = { .timeout_symbols = 3, .acquisition_symbols = 3, .search_symbols = 1, }; /* LNA Gain Settings */ status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX40), LNAGX40_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX41), LNAGX41_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX42), LNAGX42_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX43), LNAGX43_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX44), LNAGX44_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX45), LNAGX45_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX46), LNAGX46_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX47), LNAGX47_DEFAULT_GAIN, device_ref); if (status) goto finish; /* Preamble Timing Config */ status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_PRECFG), *((u8 *)&pre_cfg_value), device_ref); if (status) goto finish; /* Preamble Threshold High */ status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_PTHRH), PTHRH_DEFAULT_THRESHOLD, device_ref); if (status) goto finish; /* Tx Output Power 8 dBm */ status = tdme_setsfr_request_sync( 0, (sfr_address = CA8210_SFR_PACFGIB), PACFGIB_DEFAULT_CURRENT, device_ref); if (status) goto finish; finish: if (status != IEEE802154_SUCCESS) { dev_err( &spi->dev, "failed to set sfr at %#03x, status = %#03x\n", sfr_address, status ); } return status; } /** * tdme_channelinit() - TDME Channel Register Default Initialisation Macro (Tx) * @channel: 802.15.4 channel to initialise chip for * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of API calls */ static u8 tdme_channelinit(u8 channel, void *device_ref) { /* Transceiver front-end local oscillator tx two-point calibration * value. Tuned for the hardware. */ u8 txcalval; if (channel >= 25) txcalval = 0xA7; else if (channel >= 23) txcalval = 0xA8; else if (channel >= 22) txcalval = 0xA9; else if (channel >= 20) txcalval = 0xAA; else if (channel >= 17) txcalval = 0xAB; else if (channel >= 16) txcalval = 0xAC; else if (channel >= 14) txcalval = 0xAD; else if (channel >= 12) txcalval = 0xAE; else txcalval = 0xAF; return tdme_setsfr_request_sync( 1, CA8210_SFR_LOTXCAL, txcalval, device_ref ); /* LO Tx Cal */ } /** * tdme_checkpibattribute() - Checks Attribute Values that are not checked in * MAC * @pib_attribute: Attribute Number * @pib_attribute_length: Attribute length * @pib_attribute_value: Pointer to Attribute Value * * Return: 802.15.4 status code of checks */ static u8 tdme_checkpibattribute( u8 pib_attribute, u8 pib_attribute_length, const void *pib_attribute_value ) { u8 status = IEEE802154_SUCCESS; u8 value; value = *((u8 *)pib_attribute_value); switch (pib_attribute) { /* PHY */ case PHY_TRANSMIT_POWER: if (value > 0x3F) status = IEEE802154_INVALID_PARAMETER; break; case PHY_CCA_MODE: if (value > 0x03) status = IEEE802154_INVALID_PARAMETER; break; /* MAC */ case MAC_BATT_LIFE_EXT_PERIODS: if (value < 6 || value > 41) status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_PAYLOAD: if (pib_attribute_length > MAX_BEACON_PAYLOAD_LENGTH) status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_PAYLOAD_LENGTH: if (value > MAX_BEACON_PAYLOAD_LENGTH) status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_ORDER: if (value > 15) status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_BE: if (value < 3 || value > 8) status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_CSMA_BACKOFFS: if (value > 5) status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_FRAME_RETRIES: if (value > 7) status = IEEE802154_INVALID_PARAMETER; break; case MAC_MIN_BE: if (value > 8) status = IEEE802154_INVALID_PARAMETER; break; case MAC_RESPONSE_WAIT_TIME: if (value < 2 || value > 64) status = IEEE802154_INVALID_PARAMETER; break; case MAC_SUPERFRAME_ORDER: if (value > 15) status = IEEE802154_INVALID_PARAMETER; break; /* boolean */ case MAC_ASSOCIATED_PAN_COORD: case MAC_ASSOCIATION_PERMIT: case MAC_AUTO_REQUEST: case MAC_BATT_LIFE_EXT: case MAC_GTS_PERMIT: case MAC_PROMISCUOUS_MODE: case MAC_RX_ON_WHEN_IDLE: case MAC_SECURITY_ENABLED: if (value > 1) status = IEEE802154_INVALID_PARAMETER; break; /* MAC SEC */ case MAC_AUTO_REQUEST_SECURITY_LEVEL: if (value > 7) status = IEEE802154_INVALID_PARAMETER; break; case MAC_AUTO_REQUEST_KEY_ID_MODE: if (value > 3) status = IEEE802154_INVALID_PARAMETER; break; default: break; } return status; } /** * tdme_settxpower() - Sets the tx power for MLME_SET phyTransmitPower * @txp: Transmit Power * @device_ref: Nondescript pointer to target device * * Normalised to 802.15.4 Definition (6-bit, signed): * Bit 7-6: not used * Bit 5-0: tx power (-32 - +31 dB) * * Return: 802.15.4 status code of api calls */ static u8 tdme_settxpower(u8 txp, void *device_ref) { u8 status; s8 txp_val; u8 txp_ext; union pa_cfg_sfr pa_cfg_val; /* extend from 6 to 8 bit */ txp_ext = 0x3F & txp; if (txp_ext & 0x20) txp_ext += 0xC0; txp_val = (s8)txp_ext; if (CA8210_MAC_MPW) { if (txp_val > 0) { /* 8 dBm: ptrim = 5, itrim = +3 => +4 dBm */ pa_cfg_val.bias_current_trim = 3; pa_cfg_val.buffer_capacitor_trim = 5; pa_cfg_val.boost = 1; } else { /* 0 dBm: ptrim = 7, itrim = +3 => -6 dBm */ pa_cfg_val.bias_current_trim = 3; pa_cfg_val.buffer_capacitor_trim = 7; pa_cfg_val.boost = 0; } /* write PACFG */ status = tdme_setsfr_request_sync( 0, CA8210_SFR_PACFG, pa_cfg_val.paib, device_ref ); } else { /* Look-Up Table for Setting Current and Frequency Trim values * for desired Output Power */ if (txp_val > 8) { pa_cfg_val.paib = 0x3F; } else if (txp_val == 8) { pa_cfg_val.paib = 0x32; } else if (txp_val == 7) { pa_cfg_val.paib = 0x22; } else if (txp_val == 6) { pa_cfg_val.paib = 0x18; } else if (txp_val == 5) { pa_cfg_val.paib = 0x10; } else if (txp_val == 4) { pa_cfg_val.paib = 0x0C; } else if (txp_val == 3) { pa_cfg_val.paib = 0x08; } else if (txp_val == 2) { pa_cfg_val.paib = 0x05; } else if (txp_val == 1) { pa_cfg_val.paib = 0x03; } else if (txp_val == 0) { pa_cfg_val.paib = 0x01; } else { /* < 0 */ pa_cfg_val.paib = 0x00; } /* write PACFGIB */ status = tdme_setsfr_request_sync( 0, CA8210_SFR_PACFGIB, pa_cfg_val.paib, device_ref ); } return status; } /** * mcps_data_request() - mcps_data_request (Send Data) according to API Spec * @src_addr_mode: Source Addressing Mode * @dst_address_mode: Destination Addressing Mode * @dst_pan_id: Destination PAN ID * @dst_addr: Pointer to Destination Address * @msdu_length: length of Data * @msdu: Pointer to Data * @msdu_handle: Handle of Data * @tx_options: Tx Options Bit Field * @security: Pointer to Security Structure or NULL * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of action */ static u8 mcps_data_request( u8 src_addr_mode, u8 dst_address_mode, u16 dst_pan_id, union macaddr *dst_addr, u8 msdu_length, u8 *msdu, u8 msdu_handle, u8 tx_options, struct secspec *security, void *device_ref ) { struct secspec *psec; struct mac_message command; command.command_id = SPI_MCPS_DATA_REQUEST; command.pdata.data_req.src_addr_mode = src_addr_mode; command.pdata.data_req.dst.mode = dst_address_mode; if (dst_address_mode != MAC_MODE_NO_ADDR) { command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id); command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id); if (dst_address_mode == MAC_MODE_SHORT_ADDR) { command.pdata.data_req.dst.address[0] = LS_BYTE( dst_addr->short_address ); command.pdata.data_req.dst.address[1] = MS_BYTE( dst_addr->short_address ); } else { /* MAC_MODE_LONG_ADDR*/ memcpy( command.pdata.data_req.dst.address, dst_addr->ieee_address, 8 ); } } command.pdata.data_req.msdu_length = msdu_length; command.pdata.data_req.msdu_handle = msdu_handle; command.pdata.data_req.tx_options = tx_options; memcpy(command.pdata.data_req.msdu, msdu, msdu_length); psec = (struct secspec *)(command.pdata.data_req.msdu + msdu_length); command.length = sizeof(struct mcps_data_request_pset) - MAX_DATA_SIZE + msdu_length; if (!security || security->security_level == 0) { psec->security_level = 0; command.length += 1; } else { *psec = *security; command.length += sizeof(struct secspec); } if (ca8210_spi_transfer(device_ref, &command.command_id, command.length + 2)) return IEEE802154_SYSTEM_ERROR; return IEEE802154_SUCCESS; } /** * mlme_reset_request_sync() - MLME_RESET_request/confirm according to API Spec * @set_default_pib: Set defaults in PIB * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of MLME-RESET.confirm */ static u8 mlme_reset_request_sync( u8 set_default_pib, void *device_ref ) { u8 status; struct mac_message command, response; struct spi_device *spi = device_ref; command.command_id = SPI_MLME_RESET_REQUEST; command.length = 1; command.pdata.u8param = set_default_pib; if (cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref)) { dev_err(&spi->dev, "cascoda_api_downstream failed\n"); return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_MLME_RESET_CONFIRM) return IEEE802154_SYSTEM_ERROR; status = response.pdata.status; /* reset COORD Bit for Channel Filtering as Coordinator */ if (CA8210_MAC_WORKAROUNDS && set_default_pib && !status) { status = tdme_setsfr_request_sync( 0, CA8210_SFR_MACCON, 0, device_ref ); } return status; } /** * mlme_set_request_sync() - MLME_SET_request/confirm according to API Spec * @pib_attribute: Attribute Number * @pib_attribute_index: Index within Attribute if an Array * @pib_attribute_length: Attribute length * @pib_attribute_value: Pointer to Attribute Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of MLME-SET.confirm */ static u8 mlme_set_request_sync( u8 pib_attribute, u8 pib_attribute_index, u8 pib_attribute_length, const void *pib_attribute_value, void *device_ref ) { u8 status; struct mac_message command, response; /* pre-check the validity of pib_attribute values that are not checked * in MAC */ if (tdme_checkpibattribute( pib_attribute, pib_attribute_length, pib_attribute_value)) { return IEEE802154_INVALID_PARAMETER; } if (pib_attribute == PHY_CURRENT_CHANNEL) { status = tdme_channelinit( *((u8 *)pib_attribute_value), device_ref ); if (status) return status; } if (pib_attribute == PHY_TRANSMIT_POWER) { return tdme_settxpower( *((u8 *)pib_attribute_value), device_ref ); } command.command_id = SPI_MLME_SET_REQUEST; command.length = sizeof(struct mlme_set_request_pset) - MAX_ATTRIBUTE_SIZE + pib_attribute_length; command.pdata.set_req.pib_attribute = pib_attribute; command.pdata.set_req.pib_attribute_index = pib_attribute_index; command.pdata.set_req.pib_attribute_length = pib_attribute_length; memcpy( command.pdata.set_req.pib_attribute_value, pib_attribute_value, pib_attribute_length ); if (cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref)) { return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_MLME_SET_CONFIRM) return IEEE802154_SYSTEM_ERROR; return response.pdata.status; } /** * hwme_set_request_sync() - HWME_SET_request/confirm according to API Spec * @hw_attribute: Attribute Number * @hw_attribute_length: Attribute length * @hw_attribute_value: Pointer to Attribute Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of HWME-SET.confirm */ static u8 hwme_set_request_sync( u8 hw_attribute, u8 hw_attribute_length, u8 *hw_attribute_value, void *device_ref ) { struct mac_message command, response; command.command_id = SPI_HWME_SET_REQUEST; command.length = 2 + hw_attribute_length; command.pdata.hwme_set_req.hw_attribute = hw_attribute; command.pdata.hwme_set_req.hw_attribute_length = hw_attribute_length; memcpy( command.pdata.hwme_set_req.hw_attribute_value, hw_attribute_value, hw_attribute_length ); if (cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref)) { return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_HWME_SET_CONFIRM) return IEEE802154_SYSTEM_ERROR; return response.pdata.hwme_set_cnf.status; } /** * hwme_get_request_sync() - HWME_GET_request/confirm according to API Spec * @hw_attribute: Attribute Number * @hw_attribute_length: Attribute length * @hw_attribute_value: Pointer to Attribute Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of HWME-GET.confirm */ static u8 hwme_get_request_sync( u8 hw_attribute, u8 *hw_attribute_length, u8 *hw_attribute_value, void *device_ref ) { struct mac_message command, response; command.command_id = SPI_HWME_GET_REQUEST; command.length = 1; command.pdata.hwme_get_req.hw_attribute = hw_attribute; if (cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref)) { return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_HWME_GET_CONFIRM) return IEEE802154_SYSTEM_ERROR; if (response.pdata.hwme_get_cnf.status == IEEE802154_SUCCESS) { *hw_attribute_length = response.pdata.hwme_get_cnf.hw_attribute_length; memcpy( hw_attribute_value, response.pdata.hwme_get_cnf.hw_attribute_value, *hw_attribute_length ); } return response.pdata.hwme_get_cnf.status; } /* Network driver operation */ /** * ca8210_async_xmit_complete() - Called to announce that an asynchronous * transmission has finished * @hw: ieee802154_hw of ca8210 that has finished exchange * @msduhandle: Identifier of transmission that has completed * @status: Returned 802.15.4 status code of the transmission * * Return: 0 or linux error code */ static int ca8210_async_xmit_complete( struct ieee802154_hw *hw, u8 msduhandle, u8 status) { struct ca8210_priv *priv = hw->priv; if (priv->nextmsduhandle != msduhandle) { dev_err( &priv->spi->dev, "Unexpected msdu_handle on data confirm, Expected %d, got %d\n", priv->nextmsduhandle, msduhandle ); return -EIO; } priv->async_tx_pending = false; priv->nextmsduhandle++; if (status) { dev_err( &priv->spi->dev, "Link transmission unsuccessful, status = %d\n", status ); if (status != IEEE802154_TRANSACTION_OVERFLOW) { ieee802154_xmit_error(priv->hw, priv->tx_skb, status); return 0; } } ieee802154_xmit_complete(priv->hw, priv->tx_skb, true); return 0; } /** * ca8210_skb_rx() - Contructs a properly framed socket buffer from a received * MCPS_DATA_indication * @hw: ieee802154_hw that MCPS_DATA_indication was received by * @len: length of MCPS_DATA_indication * @data_ind: Octet array of MCPS_DATA_indication * * Called by the spi driver whenever a SAP command is received, this function * will ascertain whether the command is of interest to the network driver and * take necessary action. * * Return: 0 or linux error code */ static int ca8210_skb_rx( struct ieee802154_hw *hw, size_t len, u8 *data_ind ) { struct ieee802154_hdr hdr; int msdulen; int hlen; u8 mpdulinkquality = data_ind[23]; struct sk_buff *skb; struct ca8210_priv *priv = hw->priv; /* Allocate mtu size buffer for every rx packet */ skb = dev_alloc_skb(IEEE802154_MTU + sizeof(hdr)); if (!skb) return -ENOMEM; skb_reserve(skb, sizeof(hdr)); msdulen = data_ind[22]; /* msdu_length */ if (msdulen > IEEE802154_MTU) { dev_err( &priv->spi->dev, "received erroneously large msdu length!\n" ); kfree_skb(skb); return -EMSGSIZE; } dev_dbg(&priv->spi->dev, "skb buffer length = %d\n", msdulen); if (priv->promiscuous) goto copy_payload; /* Populate hdr */ hdr.sec.level = data_ind[29 + msdulen]; dev_dbg(&priv->spi->dev, "security level: %#03x\n", hdr.sec.level); if (hdr.sec.level > 0) { hdr.sec.key_id_mode = data_ind[30 + msdulen]; memcpy(&hdr.sec.extended_src, &data_ind[31 + msdulen], 8); hdr.sec.key_id = data_ind[39 + msdulen]; } hdr.source.mode = data_ind[0]; dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode); hdr.source.pan_id = *(u16 *)&data_ind[1]; dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id); memcpy(&hdr.source.extended_addr, &data_ind[3], 8); hdr.dest.mode = data_ind[11]; dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode); hdr.dest.pan_id = *(u16 *)&data_ind[12]; dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id); memcpy(&hdr.dest.extended_addr, &data_ind[14], 8); /* Fill in FC implicitly */ hdr.fc.type = 1; /* Data frame */ if (hdr.sec.level) hdr.fc.security_enabled = 1; else hdr.fc.security_enabled = 0; if (data_ind[1] != data_ind[12] || data_ind[2] != data_ind[13]) hdr.fc.intra_pan = 1; else hdr.fc.intra_pan = 0; hdr.fc.dest_addr_mode = hdr.dest.mode; hdr.fc.source_addr_mode = hdr.source.mode; /* Add hdr to front of buffer */ hlen = ieee802154_hdr_push(skb, &hdr); if (hlen < 0) { dev_crit(&priv->spi->dev, "failed to push mac hdr onto skb!\n"); kfree_skb(skb); return hlen; } skb_reset_mac_header(skb); skb->mac_len = hlen; copy_payload: /* Add <msdulen> bytes of space to the back of the buffer */ /* Copy msdu to skb */ skb_put_data(skb, &data_ind[29], msdulen); ieee802154_rx_irqsafe(hw, skb, mpdulinkquality); return 0; } /** * ca8210_net_rx() - Acts upon received SAP commands relevant to the network * driver * @hw: ieee802154_hw that command was received by * @command: Octet array of received command * @len: length of the received command * * Called by the spi driver whenever a SAP command is received, this function * will ascertain whether the command is of interest to the network driver and * take necessary action. * * Return: 0 or linux error code */ static int ca8210_net_rx(struct ieee802154_hw *hw, u8 *command, size_t len) { struct ca8210_priv *priv = hw->priv; unsigned long flags; u8 status; dev_dbg(&priv->spi->dev, "%s: CmdID = %d\n", __func__, command[0]); if (command[0] == SPI_MCPS_DATA_INDICATION) { /* Received data */ spin_lock_irqsave(&priv->lock, flags); if (command[26] == priv->last_dsn) { dev_dbg( &priv->spi->dev, "DSN %d resend received, ignoring...\n", command[26] ); spin_unlock_irqrestore(&priv->lock, flags); return 0; } priv->last_dsn = command[26]; spin_unlock_irqrestore(&priv->lock, flags); return ca8210_skb_rx(hw, len - 2, command + 2); } else if (command[0] == SPI_MCPS_DATA_CONFIRM) { status = command[3]; if (priv->async_tx_pending) { return ca8210_async_xmit_complete( hw, command[2], status ); } } return 0; } /** * ca8210_skb_tx() - Transmits a given socket buffer using the ca8210 * @skb: Socket buffer to transmit * @msduhandle: Data identifier to pass to the 802.15.4 MAC * @priv: Pointer to private data section of target ca8210 * * Return: 0 or linux error code */ static int ca8210_skb_tx( struct sk_buff *skb, u8 msduhandle, struct ca8210_priv *priv ) { struct ieee802154_hdr header = { }; struct secspec secspec; int mac_len, status; dev_dbg(&priv->spi->dev, "%s called\n", __func__); /* Get addressing info from skb - ieee802154 layer creates a full * packet */ mac_len = ieee802154_hdr_peek_addrs(skb, &header); if (mac_len < 0) return mac_len; secspec.security_level = header.sec.level; secspec.key_id_mode = header.sec.key_id_mode; if (secspec.key_id_mode == 2) memcpy(secspec.key_source, &header.sec.short_src, 4); else if (secspec.key_id_mode == 3) memcpy(secspec.key_source, &header.sec.extended_src, 8); secspec.key_index = header.sec.key_id; /* Pass to Cascoda API */ status = mcps_data_request( header.source.mode, header.dest.mode, header.dest.pan_id, (union macaddr *)&header.dest.extended_addr, skb->len - mac_len, &skb->data[mac_len], msduhandle, header.fc.ack_request, &secspec, priv->spi ); return link_to_linux_err(status); } /** * ca8210_start() - Starts the network driver * @hw: ieee802154_hw of ca8210 being started * * Return: 0 or linux error code */ static int ca8210_start(struct ieee802154_hw *hw) { int status; u8 rx_on_when_idle; u8 lqi_threshold = 0; struct ca8210_priv *priv = hw->priv; priv->last_dsn = -1; /* Turn receiver on when idle for now just to test rx */ rx_on_when_idle = 1; status = mlme_set_request_sync( MAC_RX_ON_WHEN_IDLE, 0, 1, &rx_on_when_idle, priv->spi ); if (status) { dev_crit( &priv->spi->dev, "Setting rx_on_when_idle failed, status = %d\n", status ); return link_to_linux_err(status); } status = hwme_set_request_sync( HWME_LQILIMIT, 1, &lqi_threshold, priv->spi ); if (status) { dev_crit( &priv->spi->dev, "Setting lqilimit failed, status = %d\n", status ); return link_to_linux_err(status); } return 0; } /** * ca8210_stop() - Stops the network driver * @hw: ieee802154_hw of ca8210 being stopped * * Return: 0 or linux error code */ static void ca8210_stop(struct ieee802154_hw *hw) { } /** * ca8210_xmit_async() - Asynchronously transmits a given socket buffer using * the ca8210 * @hw: ieee802154_hw of ca8210 to transmit from * @skb: Socket buffer to transmit * * Return: 0 or linux error code */ static int ca8210_xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb) { struct ca8210_priv *priv = hw->priv; int status; dev_dbg(&priv->spi->dev, "calling %s\n", __func__); priv->tx_skb = skb; priv->async_tx_pending = true; status = ca8210_skb_tx(skb, priv->nextmsduhandle, priv); return status; } /** * ca8210_get_ed() - Returns the measured energy on the current channel at this * instant in time * @hw: ieee802154_hw of target ca8210 * @level: Measured Energy Detect level * * Return: 0 or linux error code */ static int ca8210_get_ed(struct ieee802154_hw *hw, u8 *level) { u8 lenvar; struct ca8210_priv *priv = hw->priv; return link_to_linux_err( hwme_get_request_sync(HWME_EDVALUE, &lenvar, level, priv->spi) ); } /** * ca8210_set_channel() - Sets the current operating 802.15.4 channel of the * ca8210 * @hw: ieee802154_hw of target ca8210 * @page: Channel page to set * @channel: Channel number to set * * Return: 0 or linux error code */ static int ca8210_set_channel( struct ieee802154_hw *hw, u8 page, u8 channel ) { u8 status; struct ca8210_priv *priv = hw->priv; status = mlme_set_request_sync( PHY_CURRENT_CHANNEL, 0, 1, &channel, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting channel, MLME-SET.confirm status = %d\n", status ); } return link_to_linux_err(status); } /** * ca8210_set_hw_addr_filt() - Sets the address filtering parameters of the * ca8210 * @hw: ieee802154_hw of target ca8210 * @filt: Filtering parameters * @changed: Bitmap representing which parameters to change * * Effectively just sets the actual addressing information identifying this node * as all filtering is performed by the ca8210 as detailed in the IEEE 802.15.4 * 2006 specification. * * Return: 0 or linux error code */ static int ca8210_set_hw_addr_filt( struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed ) { u8 status = 0; struct ca8210_priv *priv = hw->priv; if (changed & IEEE802154_AFILT_PANID_CHANGED) { status = mlme_set_request_sync( MAC_PAN_ID, 0, 2, &filt->pan_id, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting pan id, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } } if (changed & IEEE802154_AFILT_SADDR_CHANGED) { status = mlme_set_request_sync( MAC_SHORT_ADDRESS, 0, 2, &filt->short_addr, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting short address, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } } if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { status = mlme_set_request_sync( NS_IEEE_ADDRESS, 0, 8, &filt->ieee_addr, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting ieee address, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } } /* TODO: Should use MLME_START to set coord bit? */ return 0; } /** * ca8210_set_tx_power() - Sets the transmit power of the ca8210 * @hw: ieee802154_hw of target ca8210 * @mbm: Transmit power in mBm (dBm*100) * * Return: 0 or linux error code */ static int ca8210_set_tx_power(struct ieee802154_hw *hw, s32 mbm) { struct ca8210_priv *priv = hw->priv; mbm /= 100; return link_to_linux_err( mlme_set_request_sync(PHY_TRANSMIT_POWER, 0, 1, &mbm, priv->spi) ); } /** * ca8210_set_cca_mode() - Sets the clear channel assessment mode of the ca8210 * @hw: ieee802154_hw of target ca8210 * @cca: CCA mode to set * * Return: 0 or linux error code */ static int ca8210_set_cca_mode( struct ieee802154_hw *hw, const struct wpan_phy_cca *cca ) { u8 status; u8 cca_mode; struct ca8210_priv *priv = hw->priv; cca_mode = cca->mode & 3; if (cca_mode == 3 && cca->opt == NL802154_CCA_OPT_ENERGY_CARRIER_OR) { /* cca_mode 0 == CS OR ED, 3 == CS AND ED */ cca_mode = 0; } status = mlme_set_request_sync( PHY_CCA_MODE, 0, 1, &cca_mode, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting cca mode, MLME-SET.confirm status = %d", status ); } return link_to_linux_err(status); } /** * ca8210_set_cca_ed_level() - Sets the CCA ED level of the ca8210 * @hw: ieee802154_hw of target ca8210 * @level: ED level to set (in mbm) * * Sets the minimum threshold of measured energy above which the ca8210 will * back off and retry a transmission. * * Return: 0 or linux error code */ static int ca8210_set_cca_ed_level(struct ieee802154_hw *hw, s32 level) { u8 status; u8 ed_threshold = (level / 100) * 2 + 256; struct ca8210_priv *priv = hw->priv; status = hwme_set_request_sync( HWME_EDTHRESHOLD, 1, &ed_threshold, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting ed threshold, HWME-SET.confirm status = %d", status ); } return link_to_linux_err(status); } /** * ca8210_set_csma_params() - Sets the CSMA parameters of the ca8210 * @hw: ieee802154_hw of target ca8210 * @min_be: Minimum backoff exponent when backing off a transmission * @max_be: Maximum backoff exponent when backing off a transmission * @retries: Number of times to retry after backing off * * Return: 0 or linux error code */ static int ca8210_set_csma_params( struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries ) { u8 status; struct ca8210_priv *priv = hw->priv; status = mlme_set_request_sync(MAC_MIN_BE, 0, 1, &min_be, priv->spi); if (status) { dev_err( &priv->spi->dev, "error setting min be, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } status = mlme_set_request_sync(MAC_MAX_BE, 0, 1, &max_be, priv->spi); if (status) { dev_err( &priv->spi->dev, "error setting max be, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } status = mlme_set_request_sync( MAC_MAX_CSMA_BACKOFFS, 0, 1, &retries, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting max csma backoffs, MLME-SET.confirm status = %d", status ); } return link_to_linux_err(status); } /** * ca8210_set_frame_retries() - Sets the maximum frame retries of the ca8210 * @hw: ieee802154_hw of target ca8210 * @retries: Number of retries * * Sets the number of times to retry a transmission if no acknowledgment was * received from the other end when one was requested. * * Return: 0 or linux error code */ static int ca8210_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { u8 status; struct ca8210_priv *priv = hw->priv; status = mlme_set_request_sync( MAC_MAX_FRAME_RETRIES, 0, 1, &retries, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting frame retries, MLME-SET.confirm status = %d", status ); } return link_to_linux_err(status); } static int ca8210_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { u8 status; struct ca8210_priv *priv = hw->priv; status = mlme_set_request_sync( MAC_PROMISCUOUS_MODE, 0, 1, (const void *)&on, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting promiscuous mode, MLME-SET.confirm status = %d", status ); } else { priv->promiscuous = on; } return link_to_linux_err(status); } static const struct ieee802154_ops ca8210_phy_ops = { .start = ca8210_start, .stop = ca8210_stop, .xmit_async = ca8210_xmit_async, .ed = ca8210_get_ed, .set_channel = ca8210_set_channel, .set_hw_addr_filt = ca8210_set_hw_addr_filt, .set_txpower = ca8210_set_tx_power, .set_cca_mode = ca8210_set_cca_mode, .set_cca_ed_level = ca8210_set_cca_ed_level, .set_csma_params = ca8210_set_csma_params, .set_frame_retries = ca8210_set_frame_retries, .set_promiscuous_mode = ca8210_set_promiscuous_mode }; /* Test/EVBME Interface */ /** * ca8210_test_int_open() - Opens the test interface to the userspace * @inodp: inode representation of file interface * @filp: file interface * * Return: 0 or linux error code */ static int ca8210_test_int_open(struct inode *inodp, struct file *filp) { struct ca8210_priv *priv = inodp->i_private; filp->private_data = priv; return 0; } /** * ca8210_test_check_upstream() - Checks a command received from the upstream * testing interface for required action * @buf: Buffer containing command to check * @device_ref: Nondescript pointer to target device * * Return: 0 or linux error code */ static int ca8210_test_check_upstream(u8 *buf, void *device_ref) { int ret; u8 response[CA8210_SPI_BUF_SIZE]; if (buf[0] == SPI_MLME_SET_REQUEST) { ret = tdme_checkpibattribute(buf[2], buf[4], buf + 5); if (ret) { response[0] = SPI_MLME_SET_CONFIRM; response[1] = 3; response[2] = IEEE802154_INVALID_PARAMETER; response[3] = buf[2]; response[4] = buf[3]; if (cascoda_api_upstream) cascoda_api_upstream(response, 5, device_ref); return ret; } } if (buf[0] == SPI_MLME_ASSOCIATE_REQUEST) { return tdme_channelinit(buf[2], device_ref); } else if (buf[0] == SPI_MLME_START_REQUEST) { return tdme_channelinit(buf[4], device_ref); } else if ( (buf[0] == SPI_MLME_SET_REQUEST) && (buf[2] == PHY_CURRENT_CHANNEL) ) { return tdme_channelinit(buf[5], device_ref); } else if ( (buf[0] == SPI_TDME_SET_REQUEST) && (buf[2] == TDME_CHANNEL) ) { return tdme_channelinit(buf[4], device_ref); } else if ( (CA8210_MAC_WORKAROUNDS) && (buf[0] == SPI_MLME_RESET_REQUEST) && (buf[2] == 1) ) { /* reset COORD Bit for Channel Filtering as Coordinator */ return tdme_setsfr_request_sync( 0, CA8210_SFR_MACCON, 0, device_ref ); } return 0; } /* End of EVBMECheckSerialCommand() */ /** * ca8210_test_int_user_write() - Called by a process in userspace to send a * message to the ca8210 drivers * @filp: file interface * @in_buf: Buffer containing message to write * @len: length of message * @off: file offset * * Return: 0 or linux error code */ static ssize_t ca8210_test_int_user_write( struct file *filp, const char __user *in_buf, size_t len, loff_t *off ) { int ret; struct ca8210_priv *priv = filp->private_data; u8 command[CA8210_SPI_BUF_SIZE]; memset(command, SPI_IDLE, 6); if (len > CA8210_SPI_BUF_SIZE || len < 2) { dev_warn( &priv->spi->dev, "userspace requested erroneous write length (%zu)\n", len ); return -EBADE; } ret = copy_from_user(command, in_buf, len); if (ret) { dev_err( &priv->spi->dev, "%d bytes could not be copied from userspace\n", ret ); return -EIO; } if (len != command[1] + 2) { dev_err( &priv->spi->dev, "write len does not match packet length field\n" ); return -EBADE; } ret = ca8210_test_check_upstream(command, priv->spi); if (ret == 0) { ret = ca8210_spi_exchange( command, command[1] + 2, NULL, priv->spi ); if (ret < 0) { /* effectively 0 bytes were written successfully */ dev_err( &priv->spi->dev, "spi exchange failed\n" ); return ret; } if (command[0] & SPI_SYN) priv->sync_down++; } return len; } /** * ca8210_test_int_user_read() - Called by a process in userspace to read a * message from the ca8210 drivers * @filp: file interface * @buf: Buffer to write message to * @len: length of message to read (ignored) * @offp: file offset * * If the O_NONBLOCK flag was set when opening the file then this function will * not block, i.e. it will return if the fifo is empty. Otherwise the function * will block, i.e. wait until new data arrives. * * Return: number of bytes read */ static ssize_t ca8210_test_int_user_read( struct file *filp, char __user *buf, size_t len, loff_t *offp ) { int i, cmdlen; struct ca8210_priv *priv = filp->private_data; unsigned char *fifo_buffer; unsigned long bytes_not_copied; if (filp->f_flags & O_NONBLOCK) { /* Non-blocking mode */ if (kfifo_is_empty(&priv->test.up_fifo)) return 0; } else { /* Blocking mode */ wait_event_interruptible( priv->test.readq, !kfifo_is_empty(&priv->test.up_fifo) ); } if (kfifo_out(&priv->test.up_fifo, &fifo_buffer, 4) != 4) { dev_err( &priv->spi->dev, "test_interface: Wrong number of elements popped from upstream fifo\n" ); return 0; } cmdlen = fifo_buffer[1]; bytes_not_copied = cmdlen + 2; bytes_not_copied = copy_to_user(buf, fifo_buffer, bytes_not_copied); if (bytes_not_copied > 0) { dev_err( &priv->spi->dev, "%lu bytes could not be copied to user space!\n", bytes_not_copied ); } dev_dbg(&priv->spi->dev, "test_interface: Cmd len = %d\n", cmdlen); dev_dbg(&priv->spi->dev, "test_interface: Read\n"); for (i = 0; i < cmdlen + 2; i++) dev_dbg(&priv->spi->dev, "%#03x\n", fifo_buffer[i]); kfree(fifo_buffer); return cmdlen + 2; } /** * ca8210_test_int_ioctl() - Called by a process in userspace to enact an * arbitrary action * @filp: file interface * @ioctl_num: which action to enact * @ioctl_param: arbitrary parameter for the action * * Return: status */ static long ca8210_test_int_ioctl( struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param ) { struct ca8210_priv *priv = filp->private_data; switch (ioctl_num) { case CA8210_IOCTL_HARD_RESET: ca8210_reset_send(priv->spi, ioctl_param); break; default: break; } return 0; } /** * ca8210_test_int_poll() - Called by a process in userspace to determine which * actions are currently possible for the file * @filp: file interface * @ptable: poll table * * Return: set of poll return flags */ static __poll_t ca8210_test_int_poll( struct file *filp, struct poll_table_struct *ptable ) { __poll_t return_flags = 0; struct ca8210_priv *priv = filp->private_data; poll_wait(filp, &priv->test.readq, ptable); if (!kfifo_is_empty(&priv->test.up_fifo)) return_flags |= (EPOLLIN | EPOLLRDNORM); if (wait_event_interruptible( priv->test.readq, !kfifo_is_empty(&priv->test.up_fifo))) { return EPOLLERR; } return return_flags; } static const struct file_operations test_int_fops = { .read = ca8210_test_int_user_read, .write = ca8210_test_int_user_write, .open = ca8210_test_int_open, .release = NULL, .unlocked_ioctl = ca8210_test_int_ioctl, .poll = ca8210_test_int_poll }; /* Init/Deinit */ /** * ca8210_get_platform_data() - Populate a ca8210_platform_data object * @spi_device: Pointer to ca8210 spi device object to get data for * @pdata: Pointer to ca8210_platform_data object to populate * * Return: 0 or linux error code */ static int ca8210_get_platform_data( struct spi_device *spi_device, struct ca8210_platform_data *pdata ) { int ret = 0; if (!spi_device->dev.of_node) return -EINVAL; pdata->extclockenable = of_property_read_bool( spi_device->dev.of_node, "extclock-enable" ); if (pdata->extclockenable) { ret = of_property_read_u32( spi_device->dev.of_node, "extclock-freq", &pdata->extclockfreq ); if (ret < 0) return ret; ret = of_property_read_u32( spi_device->dev.of_node, "extclock-gpio", &pdata->extclockgpio ); } return ret; } /** * ca8210_config_extern_clk() - Configure the external clock provided by the * ca8210 * @pdata: Pointer to ca8210_platform_data containing clock parameters * @spi: Pointer to target ca8210 spi device * @on: True to turn the clock on, false to turn off * * The external clock is configured with a frequency and output pin taken from * the platform data. * * Return: 0 or linux error code */ static int ca8210_config_extern_clk( struct ca8210_platform_data *pdata, struct spi_device *spi, bool on ) { u8 clkparam[2]; if (on) { dev_info(&spi->dev, "Switching external clock on\n"); switch (pdata->extclockfreq) { case SIXTEEN_MHZ: clkparam[0] = 1; break; case EIGHT_MHZ: clkparam[0] = 2; break; case FOUR_MHZ: clkparam[0] = 3; break; case TWO_MHZ: clkparam[0] = 4; break; case ONE_MHZ: clkparam[0] = 5; break; default: dev_crit(&spi->dev, "Invalid extclock-freq\n"); return -EINVAL; } clkparam[1] = pdata->extclockgpio; } else { dev_info(&spi->dev, "Switching external clock off\n"); clkparam[0] = 0; /* off */ clkparam[1] = 0; } return link_to_linux_err( hwme_set_request_sync(HWME_SYSCLKOUT, 2, clkparam, spi) ); } /** * ca8210_register_ext_clock() - Register ca8210's external clock with kernel * @spi: Pointer to target ca8210 spi device * * Return: 0 or linux error code */ static int ca8210_register_ext_clock(struct spi_device *spi) { struct device_node *np = spi->dev.of_node; struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_platform_data *pdata = spi->dev.platform_data; int ret = 0; if (!np) return -EFAULT; priv->clk = clk_register_fixed_rate( &spi->dev, np->name, NULL, 0, pdata->extclockfreq ); if (IS_ERR(priv->clk)) { dev_crit(&spi->dev, "Failed to register external clk\n"); return PTR_ERR(priv->clk); } ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); if (ret) { clk_unregister(priv->clk); dev_crit( &spi->dev, "Failed to register external clock as clock provider\n" ); } else { dev_info(&spi->dev, "External clock set as clock provider\n"); } return ret; } /** * ca8210_unregister_ext_clock() - Unregister ca8210's external clock with * kernel * @spi: Pointer to target ca8210 spi device */ static void ca8210_unregister_ext_clock(struct spi_device *spi) { struct ca8210_priv *priv = spi_get_drvdata(spi); if (!priv->clk) return of_clk_del_provider(spi->dev.of_node); clk_unregister(priv->clk); dev_info(&spi->dev, "External clock unregistered\n"); } /** * ca8210_reset_init() - Initialise the reset input to the ca8210 * @spi: Pointer to target ca8210 spi device * * Return: 0 or linux error code */ static int ca8210_reset_init(struct spi_device *spi) { int ret; struct ca8210_platform_data *pdata = spi->dev.platform_data; pdata->gpio_reset = of_get_named_gpio( spi->dev.of_node, "reset-gpio", 0 ); ret = gpio_direction_output(pdata->gpio_reset, 1); if (ret < 0) { dev_crit( &spi->dev, "Reset GPIO %d did not set to output mode\n", pdata->gpio_reset ); } return ret; } /** * ca8210_interrupt_init() - Initialise the irq output from the ca8210 * @spi: Pointer to target ca8210 spi device * * Return: 0 or linux error code */ static int ca8210_interrupt_init(struct spi_device *spi) { int ret; struct ca8210_platform_data *pdata = spi->dev.platform_data; pdata->gpio_irq = of_get_named_gpio( spi->dev.of_node, "irq-gpio", 0 ); pdata->irq_id = gpio_to_irq(pdata->gpio_irq); if (pdata->irq_id < 0) { dev_crit( &spi->dev, "Could not get irq for gpio pin %d\n", pdata->gpio_irq ); gpio_free(pdata->gpio_irq); return pdata->irq_id; } ret = request_irq( pdata->irq_id, ca8210_interrupt_handler, IRQF_TRIGGER_FALLING, "ca8210-irq", spi_get_drvdata(spi) ); if (ret) { dev_crit(&spi->dev, "request_irq %d failed\n", pdata->irq_id); gpio_free(pdata->gpio_irq); } return ret; } /** * ca8210_dev_com_init() - Initialise the spi communication component * @priv: Pointer to private data structure * * Return: 0 or linux error code */ static int ca8210_dev_com_init(struct ca8210_priv *priv) { priv->mlme_workqueue = alloc_ordered_workqueue( "MLME work queue", WQ_UNBOUND ); if (!priv->mlme_workqueue) { dev_crit(&priv->spi->dev, "alloc of mlme_workqueue failed!\n"); return -ENOMEM; } priv->irq_workqueue = alloc_ordered_workqueue( "ca8210 irq worker", WQ_UNBOUND ); if (!priv->irq_workqueue) { dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n"); destroy_workqueue(priv->mlme_workqueue); return -ENOMEM; } return 0; } /** * ca8210_dev_com_clear() - Deinitialise the spi communication component * @priv: Pointer to private data structure */ static void ca8210_dev_com_clear(struct ca8210_priv *priv) { destroy_workqueue(priv->mlme_workqueue); destroy_workqueue(priv->irq_workqueue); } #define CA8210_MAX_TX_POWERS (9) static const s32 ca8210_tx_powers[CA8210_MAX_TX_POWERS] = { 800, 700, 600, 500, 400, 300, 200, 100, 0 }; #define CA8210_MAX_ED_LEVELS (21) static const s32 ca8210_ed_levels[CA8210_MAX_ED_LEVELS] = { -10300, -10250, -10200, -10150, -10100, -10050, -10000, -9950, -9900, -9850, -9800, -9750, -9700, -9650, -9600, -9550, -9500, -9450, -9400, -9350, -9300 }; /** * ca8210_hw_setup() - Populate the ieee802154_hw phy attributes with the * ca8210's defaults * @ca8210_hw: Pointer to ieee802154_hw to populate */ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) { /* Support channels 11-26 */ ca8210_hw->phy->supported.channels[0] = CA8210_VALID_CHANNELS; ca8210_hw->phy->supported.tx_powers_size = CA8210_MAX_TX_POWERS; ca8210_hw->phy->supported.tx_powers = ca8210_tx_powers; ca8210_hw->phy->supported.cca_ed_levels_size = CA8210_MAX_ED_LEVELS; ca8210_hw->phy->supported.cca_ed_levels = ca8210_ed_levels; ca8210_hw->phy->current_channel = 18; ca8210_hw->phy->current_page = 0; ca8210_hw->phy->transmit_power = 800; ca8210_hw->phy->cca.mode = NL802154_CCA_ENERGY_CARRIER; ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; ca8210_hw->phy->symbol_duration = 16; ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; ca8210_hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE | WPAN_PHY_FLAG_DATAGRAMS_ONLY; } /** * ca8210_test_interface_init() - Initialise the test file interface * @priv: Pointer to private data structure * * Provided as an alternative to the standard linux network interface, the test * interface exposes a file in the filesystem (ca8210_test) that allows * 802.15.4 SAP Commands and Cascoda EVBME commands to be sent directly to * the stack. * * Return: 0 or linux error code */ static int ca8210_test_interface_init(struct ca8210_priv *priv) { struct ca8210_test *test = &priv->test; char node_name[32]; snprintf( node_name, sizeof(node_name), "ca8210@%d_%d", priv->spi->master->bus_num, spi_get_chipselect(priv->spi, 0) ); test->ca8210_dfs_spi_int = debugfs_create_file( node_name, 0600, /* S_IRUSR | S_IWUSR */ NULL, priv, &test_int_fops ); debugfs_create_symlink("ca8210", NULL, node_name); init_waitqueue_head(&test->readq); return kfifo_alloc( &test->up_fifo, CA8210_TEST_INT_FIFO_SIZE, GFP_KERNEL ); } /** * ca8210_test_interface_clear() - Deinitialise the test file interface * @priv: Pointer to private data structure */ static void ca8210_test_interface_clear(struct ca8210_priv *priv) { struct ca8210_test *test = &priv->test; debugfs_remove(test->ca8210_dfs_spi_int); kfifo_free(&test->up_fifo); dev_info(&priv->spi->dev, "Test interface removed\n"); } /** * ca8210_remove() - Shut down a ca8210 upon being disconnected * @spi_device: Pointer to spi device data structure * * Return: 0 or linux error code */ static void ca8210_remove(struct spi_device *spi_device) { struct ca8210_priv *priv; struct ca8210_platform_data *pdata; dev_info(&spi_device->dev, "Removing ca8210\n"); pdata = spi_device->dev.platform_data; if (pdata) { if (pdata->extclockenable) { ca8210_unregister_ext_clock(spi_device); ca8210_config_extern_clk(pdata, spi_device, 0); } free_irq(pdata->irq_id, spi_device->dev.driver_data); kfree(pdata); spi_device->dev.platform_data = NULL; } /* get spi_device private data */ priv = spi_get_drvdata(spi_device); if (priv) { dev_info( &spi_device->dev, "sync_down = %d, sync_up = %d\n", priv->sync_down, priv->sync_up ); ca8210_dev_com_clear(spi_device->dev.driver_data); if (priv->hw) { if (priv->hw_registered) ieee802154_unregister_hw(priv->hw); ieee802154_free_hw(priv->hw); priv->hw = NULL; dev_info( &spi_device->dev, "Unregistered & freed ieee802154_hw.\n" ); } if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) ca8210_test_interface_clear(priv); } } /** * ca8210_probe() - Set up a connected ca8210 upon being detected by the system * @spi_device: Pointer to spi device data structure * * Return: 0 or linux error code */ static int ca8210_probe(struct spi_device *spi_device) { struct ca8210_priv *priv; struct ieee802154_hw *hw; struct ca8210_platform_data *pdata; int ret; dev_info(&spi_device->dev, "Inserting ca8210\n"); /* allocate ieee802154_hw and private data */ hw = ieee802154_alloc_hw(sizeof(struct ca8210_priv), &ca8210_phy_ops); if (!hw) { dev_crit(&spi_device->dev, "ieee802154_alloc_hw failed\n"); ret = -ENOMEM; goto error; } priv = hw->priv; priv->hw = hw; priv->spi = spi_device; hw->parent = &spi_device->dev; spin_lock_init(&priv->lock); priv->async_tx_pending = false; priv->hw_registered = false; priv->sync_up = 0; priv->sync_down = 0; priv->promiscuous = false; priv->retries = 0; init_completion(&priv->ca8210_is_awake); init_completion(&priv->spi_transfer_complete); init_completion(&priv->sync_exchange_complete); spi_set_drvdata(priv->spi, priv); if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) { cascoda_api_upstream = ca8210_test_int_driver_write; ca8210_test_interface_init(priv); } else { cascoda_api_upstream = NULL; } ca8210_hw_setup(hw); ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { ret = -ENOMEM; goto error; } priv->spi->dev.platform_data = pdata; ret = ca8210_get_platform_data(priv->spi, pdata); if (ret) { dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n"); goto error; } ret = ca8210_dev_com_init(priv); if (ret) { dev_crit(&spi_device->dev, "ca8210_dev_com_init failed\n"); goto error; } ret = ca8210_reset_init(priv->spi); if (ret) { dev_crit(&spi_device->dev, "ca8210_reset_init failed\n"); goto error; } ret = ca8210_interrupt_init(priv->spi); if (ret) { dev_crit(&spi_device->dev, "ca8210_interrupt_init failed\n"); goto error; } msleep(100); ca8210_reset_send(priv->spi, 1); ret = tdme_chipinit(priv->spi); if (ret) { dev_crit(&spi_device->dev, "tdme_chipinit failed\n"); goto error; } if (pdata->extclockenable) { ret = ca8210_config_extern_clk(pdata, priv->spi, 1); if (ret) { dev_crit( &spi_device->dev, "ca8210_config_extern_clk failed\n" ); goto error; } ret = ca8210_register_ext_clock(priv->spi); if (ret) { dev_crit( &spi_device->dev, "ca8210_register_ext_clock failed\n" ); goto error; } } ret = ieee802154_register_hw(hw); if (ret) { dev_crit(&spi_device->dev, "ieee802154_register_hw failed\n"); goto error; } priv->hw_registered = true; return 0; error: msleep(100); /* wait for pending spi transfers to complete */ ca8210_remove(spi_device); return link_to_linux_err(ret); } static const struct of_device_id ca8210_of_ids[] = { {.compatible = "cascoda,ca8210", }, {}, }; MODULE_DEVICE_TABLE(of, ca8210_of_ids); static struct spi_driver ca8210_spi_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = ca8210_of_ids, }, .probe = ca8210_probe, .remove = ca8210_remove }; module_spi_driver(ca8210_spi_driver); MODULE_AUTHOR("Harry Morris <[email protected]>"); MODULE_DESCRIPTION("CA-8210 SoftMAC driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION("1.0");
linux-master
drivers/net/ieee802154/ca8210.c
// SPDX-License-Identifier: GPL-2.0-only /* * HWSIM IEEE 802.15.4 interface * * (C) 2018 Mojatau, Alexander Aring <[email protected]> * Copyright 2007-2012 Siemens AG * * Based on fakelb, original Written by: * Sergey Lapin <[email protected]> * Dmitry Eremin-Solenikov <[email protected]> * Alexander Smirnov <[email protected]> */ #include <linux/module.h> #include <linux/timer.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/netdevice.h> #include <linux/device.h> #include <linux/spinlock.h> #include <net/ieee802154_netdev.h> #include <net/mac802154.h> #include <net/cfg802154.h> #include <net/genetlink.h> #include "mac802154_hwsim.h" MODULE_DESCRIPTION("Software simulator of IEEE 802.15.4 radio(s) for mac802154"); MODULE_LICENSE("GPL"); static LIST_HEAD(hwsim_phys); static DEFINE_MUTEX(hwsim_phys_lock); static struct platform_device *mac802154hwsim_dev; /* MAC802154_HWSIM netlink family */ static struct genl_family hwsim_genl_family; static int hwsim_radio_idx; enum hwsim_multicast_groups { HWSIM_MCGRP_CONFIG, }; static const struct genl_multicast_group hwsim_mcgrps[] = { [HWSIM_MCGRP_CONFIG] = { .name = "config", }, }; struct hwsim_pib { u8 page; u8 channel; struct ieee802154_hw_addr_filt filt; enum ieee802154_filtering_level filt_level; struct rcu_head rcu; }; struct hwsim_edge_info { u8 lqi; struct rcu_head rcu; }; struct hwsim_edge { struct hwsim_phy *endpoint; struct hwsim_edge_info __rcu *info; struct list_head list; struct rcu_head rcu; }; struct hwsim_phy { struct ieee802154_hw *hw; u32 idx; struct hwsim_pib __rcu *pib; bool suspended; struct list_head edges; struct list_head list; }; static int hwsim_add_one(struct genl_info *info, struct device *dev, bool init); static void hwsim_del(struct hwsim_phy *phy); static int hwsim_hw_ed(struct ieee802154_hw *hw, u8 *level) { *level = 0xbe; return 0; } static int hwsim_update_pib(struct ieee802154_hw *hw, u8 page, u8 channel, struct ieee802154_hw_addr_filt *filt, enum ieee802154_filtering_level filt_level) { struct hwsim_phy *phy = hw->priv; struct hwsim_pib *pib, *pib_old; pib = kzalloc(sizeof(*pib), GFP_ATOMIC); if (!pib) return -ENOMEM; pib_old = rtnl_dereference(phy->pib); pib->page = page; pib->channel = channel; pib->filt.short_addr = filt->short_addr; pib->filt.pan_id = filt->pan_id; pib->filt.ieee_addr = filt->ieee_addr; pib->filt.pan_coord = filt->pan_coord; pib->filt_level = filt_level; rcu_assign_pointer(phy->pib, pib); kfree_rcu(pib_old, rcu); return 0; } static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct hwsim_phy *phy = hw->priv; struct hwsim_pib *pib; int ret; rcu_read_lock(); pib = rcu_dereference(phy->pib); ret = hwsim_update_pib(hw, page, channel, &pib->filt, pib->filt_level); rcu_read_unlock(); return ret; } static int hwsim_hw_addr_filt(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { struct hwsim_phy *phy = hw->priv; struct hwsim_pib *pib; int ret; rcu_read_lock(); pib = rcu_dereference(phy->pib); ret = hwsim_update_pib(hw, pib->page, pib->channel, filt, pib->filt_level); rcu_read_unlock(); return ret; } static void hwsim_hw_receive(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi) { struct ieee802154_hdr hdr; struct hwsim_phy *phy = hw->priv; struct hwsim_pib *pib; rcu_read_lock(); pib = rcu_dereference(phy->pib); if (!pskb_may_pull(skb, 3)) { dev_dbg(hw->parent, "invalid frame\n"); goto drop; } memcpy(&hdr, skb->data, 3); /* Level 4 filtering: Frame fields validity */ if (pib->filt_level == IEEE802154_FILTERING_4_FRAME_FIELDS) { /* a) Drop reserved frame types */ switch (mac_cb(skb)->type) { case IEEE802154_FC_TYPE_BEACON: case IEEE802154_FC_TYPE_DATA: case IEEE802154_FC_TYPE_ACK: case IEEE802154_FC_TYPE_MAC_CMD: break; default: dev_dbg(hw->parent, "unrecognized frame type 0x%x\n", mac_cb(skb)->type); goto drop; } /* b) Drop reserved frame versions */ switch (hdr.fc.version) { case IEEE802154_2003_STD: case IEEE802154_2006_STD: case IEEE802154_STD: break; default: dev_dbg(hw->parent, "unrecognized frame version 0x%x\n", hdr.fc.version); goto drop; } /* c) PAN ID constraints */ if ((mac_cb(skb)->dest.mode == IEEE802154_ADDR_LONG || mac_cb(skb)->dest.mode == IEEE802154_ADDR_SHORT) && mac_cb(skb)->dest.pan_id != pib->filt.pan_id && mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) { dev_dbg(hw->parent, "unrecognized PAN ID %04x\n", le16_to_cpu(mac_cb(skb)->dest.pan_id)); goto drop; } /* d1) Short address constraints */ if (mac_cb(skb)->dest.mode == IEEE802154_ADDR_SHORT && mac_cb(skb)->dest.short_addr != pib->filt.short_addr && mac_cb(skb)->dest.short_addr != cpu_to_le16(IEEE802154_ADDR_BROADCAST)) { dev_dbg(hw->parent, "unrecognized short address %04x\n", le16_to_cpu(mac_cb(skb)->dest.short_addr)); goto drop; } /* d2) Extended address constraints */ if (mac_cb(skb)->dest.mode == IEEE802154_ADDR_LONG && mac_cb(skb)->dest.extended_addr != pib->filt.ieee_addr) { dev_dbg(hw->parent, "unrecognized long address 0x%016llx\n", mac_cb(skb)->dest.extended_addr); goto drop; } /* d4) Specific PAN coordinator case (no parent) */ if ((mac_cb(skb)->type == IEEE802154_FC_TYPE_DATA || mac_cb(skb)->type == IEEE802154_FC_TYPE_MAC_CMD) && mac_cb(skb)->dest.mode == IEEE802154_ADDR_NONE) { dev_dbg(hw->parent, "relaying is not supported\n"); goto drop; } /* e) Beacon frames follow specific PAN ID rules */ if (mac_cb(skb)->type == IEEE802154_FC_TYPE_BEACON && pib->filt.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST) && mac_cb(skb)->dest.pan_id != pib->filt.pan_id) { dev_dbg(hw->parent, "invalid beacon PAN ID %04x\n", le16_to_cpu(mac_cb(skb)->dest.pan_id)); goto drop; } } rcu_read_unlock(); ieee802154_rx_irqsafe(hw, skb, lqi); return; drop: rcu_read_unlock(); kfree_skb(skb); } static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct hwsim_phy *current_phy = hw->priv; struct hwsim_pib *current_pib, *endpoint_pib; struct hwsim_edge_info *einfo; struct hwsim_edge *e; WARN_ON(current_phy->suspended); rcu_read_lock(); current_pib = rcu_dereference(current_phy->pib); list_for_each_entry_rcu(e, &current_phy->edges, list) { /* Can be changed later in rx_irqsafe, but this is only a * performance tweak. Received radio should drop the frame * in mac802154 stack anyway... so we don't need to be * 100% of locking here to check on suspended */ if (e->endpoint->suspended) continue; endpoint_pib = rcu_dereference(e->endpoint->pib); if (current_pib->page == endpoint_pib->page && current_pib->channel == endpoint_pib->channel) { struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC); einfo = rcu_dereference(e->info); if (newskb) hwsim_hw_receive(e->endpoint->hw, newskb, einfo->lqi); } } rcu_read_unlock(); ieee802154_xmit_complete(hw, skb, false); return 0; } static int hwsim_hw_start(struct ieee802154_hw *hw) { struct hwsim_phy *phy = hw->priv; phy->suspended = false; return 0; } static void hwsim_hw_stop(struct ieee802154_hw *hw) { struct hwsim_phy *phy = hw->priv; phy->suspended = true; } static int hwsim_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { enum ieee802154_filtering_level filt_level; struct hwsim_phy *phy = hw->priv; struct hwsim_pib *pib; int ret; if (on) filt_level = IEEE802154_FILTERING_NONE; else filt_level = IEEE802154_FILTERING_4_FRAME_FIELDS; rcu_read_lock(); pib = rcu_dereference(phy->pib); ret = hwsim_update_pib(hw, pib->page, pib->channel, &pib->filt, filt_level); rcu_read_unlock(); return ret; } static const struct ieee802154_ops hwsim_ops = { .owner = THIS_MODULE, .xmit_async = hwsim_hw_xmit, .ed = hwsim_hw_ed, .set_channel = hwsim_hw_channel, .start = hwsim_hw_start, .stop = hwsim_hw_stop, .set_promiscuous_mode = hwsim_set_promiscuous_mode, .set_hw_addr_filt = hwsim_hw_addr_filt, }; static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { return hwsim_add_one(info, &mac802154hwsim_dev->dev, false); } static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_phy *phy, *tmp; s64 idx = -1; if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]) return -EINVAL; idx = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); mutex_lock(&hwsim_phys_lock); list_for_each_entry_safe(phy, tmp, &hwsim_phys, list) { if (idx == phy->idx) { hwsim_del(phy); mutex_unlock(&hwsim_phys_lock); return 0; } } mutex_unlock(&hwsim_phys_lock); return -ENODEV; } static int append_radio_msg(struct sk_buff *skb, struct hwsim_phy *phy) { struct nlattr *nl_edges, *nl_edge; struct hwsim_edge_info *einfo; struct hwsim_edge *e; int ret; ret = nla_put_u32(skb, MAC802154_HWSIM_ATTR_RADIO_ID, phy->idx); if (ret < 0) return ret; rcu_read_lock(); if (list_empty(&phy->edges)) { rcu_read_unlock(); return 0; } nl_edges = nla_nest_start_noflag(skb, MAC802154_HWSIM_ATTR_RADIO_EDGES); if (!nl_edges) { rcu_read_unlock(); return -ENOBUFS; } list_for_each_entry_rcu(e, &phy->edges, list) { nl_edge = nla_nest_start_noflag(skb, MAC802154_HWSIM_ATTR_RADIO_EDGE); if (!nl_edge) { rcu_read_unlock(); nla_nest_cancel(skb, nl_edges); return -ENOBUFS; } ret = nla_put_u32(skb, MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID, e->endpoint->idx); if (ret < 0) { rcu_read_unlock(); nla_nest_cancel(skb, nl_edge); nla_nest_cancel(skb, nl_edges); return ret; } einfo = rcu_dereference(e->info); ret = nla_put_u8(skb, MAC802154_HWSIM_EDGE_ATTR_LQI, einfo->lqi); if (ret < 0) { rcu_read_unlock(); nla_nest_cancel(skb, nl_edge); nla_nest_cancel(skb, nl_edges); return ret; } nla_nest_end(skb, nl_edge); } rcu_read_unlock(); nla_nest_end(skb, nl_edges); return 0; } static int hwsim_get_radio(struct sk_buff *skb, struct hwsim_phy *phy, u32 portid, u32 seq, struct netlink_callback *cb, int flags) { void *hdr; int res; hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags, MAC802154_HWSIM_CMD_GET_RADIO); if (!hdr) return -EMSGSIZE; if (cb) genl_dump_check_consistent(cb, hdr); res = append_radio_msg(skb, phy); if (res < 0) goto out_err; genlmsg_end(skb, hdr); return 0; out_err: genlmsg_cancel(skb, hdr); return res; } static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_phy *phy; struct sk_buff *skb; int idx, res = -ENODEV; if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]) return -EINVAL; idx = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); mutex_lock(&hwsim_phys_lock); list_for_each_entry(phy, &hwsim_phys, list) { if (phy->idx != idx) continue; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) { res = -ENOMEM; goto out_err; } res = hwsim_get_radio(skb, phy, info->snd_portid, info->snd_seq, NULL, 0); if (res < 0) { nlmsg_free(skb); goto out_err; } res = genlmsg_reply(skb, info); break; } out_err: mutex_unlock(&hwsim_phys_lock); return res; } static int hwsim_dump_radio_nl(struct sk_buff *skb, struct netlink_callback *cb) { int idx = cb->args[0]; struct hwsim_phy *phy; int res; mutex_lock(&hwsim_phys_lock); if (idx == hwsim_radio_idx) goto done; list_for_each_entry(phy, &hwsim_phys, list) { if (phy->idx < idx) continue; res = hwsim_get_radio(skb, phy, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); if (res < 0) break; idx = phy->idx + 1; } cb->args[0] = idx; done: mutex_unlock(&hwsim_phys_lock); return skb->len; } /* caller need to held hwsim_phys_lock */ static struct hwsim_phy *hwsim_get_radio_by_id(uint32_t idx) { struct hwsim_phy *phy; list_for_each_entry(phy, &hwsim_phys, list) { if (phy->idx == idx) return phy; } return NULL; } static const struct nla_policy hwsim_edge_policy[MAC802154_HWSIM_EDGE_ATTR_MAX + 1] = { [MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] = { .type = NLA_U32 }, [MAC802154_HWSIM_EDGE_ATTR_LQI] = { .type = NLA_U8 }, }; static struct hwsim_edge *hwsim_alloc_edge(struct hwsim_phy *endpoint, u8 lqi) { struct hwsim_edge_info *einfo; struct hwsim_edge *e; e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) return NULL; einfo = kzalloc(sizeof(*einfo), GFP_KERNEL); if (!einfo) { kfree(e); return NULL; } einfo->lqi = 0xff; rcu_assign_pointer(e->info, einfo); e->endpoint = endpoint; return e; } static void hwsim_free_edge(struct hwsim_edge *e) { struct hwsim_edge_info *einfo; rcu_read_lock(); einfo = rcu_dereference(e->info); rcu_read_unlock(); kfree_rcu(einfo, rcu); kfree_rcu(e, rcu); } static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info) { struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1]; struct hwsim_phy *phy_v0, *phy_v1; struct hwsim_edge *e; u32 v0, v1; if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]) return -EINVAL; v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]); if (v0 == v1) return -EINVAL; mutex_lock(&hwsim_phys_lock); phy_v0 = hwsim_get_radio_by_id(v0); if (!phy_v0) { mutex_unlock(&hwsim_phys_lock); return -ENOENT; } phy_v1 = hwsim_get_radio_by_id(v1); if (!phy_v1) { mutex_unlock(&hwsim_phys_lock); return -ENOENT; } rcu_read_lock(); list_for_each_entry_rcu(e, &phy_v0->edges, list) { if (e->endpoint->idx == v1) { mutex_unlock(&hwsim_phys_lock); rcu_read_unlock(); return -EEXIST; } } rcu_read_unlock(); e = hwsim_alloc_edge(phy_v1, 0xff); if (!e) { mutex_unlock(&hwsim_phys_lock); return -ENOMEM; } list_add_rcu(&e->list, &phy_v0->edges); /* wait until changes are done under hwsim_phys_lock lock * should prevent of calling this function twice while * edges list has not the changes yet. */ synchronize_rcu(); mutex_unlock(&hwsim_phys_lock); return 0; } static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info) { struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1]; struct hwsim_phy *phy_v0; struct hwsim_edge *e; u32 v0, v1; if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]) return -EINVAL; v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]); mutex_lock(&hwsim_phys_lock); phy_v0 = hwsim_get_radio_by_id(v0); if (!phy_v0) { mutex_unlock(&hwsim_phys_lock); return -ENOENT; } rcu_read_lock(); list_for_each_entry_rcu(e, &phy_v0->edges, list) { if (e->endpoint->idx == v1) { rcu_read_unlock(); list_del_rcu(&e->list); hwsim_free_edge(e); /* same again - wait until list changes are done */ synchronize_rcu(); mutex_unlock(&hwsim_phys_lock); return 0; } } rcu_read_unlock(); mutex_unlock(&hwsim_phys_lock); return -ENOENT; } static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) { struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1]; struct hwsim_edge_info *einfo, *einfo_old; struct hwsim_phy *phy_v0; struct hwsim_edge *e; u32 v0, v1; u8 lqi; if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] || !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI]) return -EINVAL; v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]); lqi = nla_get_u8(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI]); mutex_lock(&hwsim_phys_lock); phy_v0 = hwsim_get_radio_by_id(v0); if (!phy_v0) { mutex_unlock(&hwsim_phys_lock); return -ENOENT; } einfo = kzalloc(sizeof(*einfo), GFP_KERNEL); if (!einfo) { mutex_unlock(&hwsim_phys_lock); return -ENOMEM; } rcu_read_lock(); list_for_each_entry_rcu(e, &phy_v0->edges, list) { if (e->endpoint->idx == v1) { einfo->lqi = lqi; einfo_old = rcu_replace_pointer(e->info, einfo, lockdep_is_held(&hwsim_phys_lock)); rcu_read_unlock(); kfree_rcu(einfo_old, rcu); mutex_unlock(&hwsim_phys_lock); return 0; } } rcu_read_unlock(); kfree(einfo); mutex_unlock(&hwsim_phys_lock); return -ENOENT; } /* MAC802154_HWSIM netlink policy */ static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] = { [MAC802154_HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 }, [MAC802154_HWSIM_ATTR_RADIO_EDGE] = { .type = NLA_NESTED }, [MAC802154_HWSIM_ATTR_RADIO_EDGES] = { .type = NLA_NESTED }, }; /* Generic Netlink operations array */ static const struct genl_small_ops hwsim_nl_ops[] = { { .cmd = MAC802154_HWSIM_CMD_NEW_RADIO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_new_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MAC802154_HWSIM_CMD_DEL_RADIO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_del_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MAC802154_HWSIM_CMD_GET_RADIO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_get_radio_nl, .dumpit = hwsim_dump_radio_nl, }, { .cmd = MAC802154_HWSIM_CMD_NEW_EDGE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_new_edge_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MAC802154_HWSIM_CMD_DEL_EDGE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_del_edge_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MAC802154_HWSIM_CMD_SET_EDGE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_set_edge_lqi, .flags = GENL_UNS_ADMIN_PERM, }, }; static struct genl_family hwsim_genl_family __ro_after_init = { .name = "MAC802154_HWSIM", .version = 1, .maxattr = MAC802154_HWSIM_ATTR_MAX, .policy = hwsim_genl_policy, .module = THIS_MODULE, .small_ops = hwsim_nl_ops, .n_small_ops = ARRAY_SIZE(hwsim_nl_ops), .resv_start_op = MAC802154_HWSIM_CMD_NEW_EDGE + 1, .mcgrps = hwsim_mcgrps, .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), }; static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, struct genl_info *info) { if (info) genl_notify(&hwsim_genl_family, mcast_skb, info, HWSIM_MCGRP_CONFIG, GFP_KERNEL); else genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0, HWSIM_MCGRP_CONFIG, GFP_KERNEL); } static void hwsim_mcast_new_radio(struct genl_info *info, struct hwsim_phy *phy) { struct sk_buff *mcast_skb; void *data; mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!mcast_skb) return; data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0, MAC802154_HWSIM_CMD_NEW_RADIO); if (!data) goto out_err; if (append_radio_msg(mcast_skb, phy) < 0) goto out_err; genlmsg_end(mcast_skb, data); hwsim_mcast_config_msg(mcast_skb, info); return; out_err: genlmsg_cancel(mcast_skb, data); nlmsg_free(mcast_skb); } static void hwsim_edge_unsubscribe_me(struct hwsim_phy *phy) { struct hwsim_phy *tmp; struct hwsim_edge *e; rcu_read_lock(); /* going to all phy edges and remove phy from it */ list_for_each_entry(tmp, &hwsim_phys, list) { list_for_each_entry_rcu(e, &tmp->edges, list) { if (e->endpoint->idx == phy->idx) { list_del_rcu(&e->list); hwsim_free_edge(e); } } } rcu_read_unlock(); synchronize_rcu(); } static int hwsim_subscribe_all_others(struct hwsim_phy *phy) { struct hwsim_phy *sub; struct hwsim_edge *e; list_for_each_entry(sub, &hwsim_phys, list) { e = hwsim_alloc_edge(sub, 0xff); if (!e) goto me_fail; list_add_rcu(&e->list, &phy->edges); } list_for_each_entry(sub, &hwsim_phys, list) { e = hwsim_alloc_edge(phy, 0xff); if (!e) goto sub_fail; list_add_rcu(&e->list, &sub->edges); } return 0; sub_fail: hwsim_edge_unsubscribe_me(phy); me_fail: rcu_read_lock(); list_for_each_entry_rcu(e, &phy->edges, list) { list_del_rcu(&e->list); hwsim_free_edge(e); } rcu_read_unlock(); return -ENOMEM; } static int hwsim_add_one(struct genl_info *info, struct device *dev, bool init) { struct ieee802154_hw *hw; struct hwsim_phy *phy; struct hwsim_pib *pib; int idx; int err; idx = hwsim_radio_idx++; hw = ieee802154_alloc_hw(sizeof(*phy), &hwsim_ops); if (!hw) return -ENOMEM; phy = hw->priv; phy->hw = hw; /* 868 MHz BPSK 802.15.4-2003 */ hw->phy->supported.channels[0] |= 1; /* 915 MHz BPSK 802.15.4-2003 */ hw->phy->supported.channels[0] |= 0x7fe; /* 2.4 GHz O-QPSK 802.15.4-2003 */ hw->phy->supported.channels[0] |= 0x7FFF800; /* 868 MHz ASK 802.15.4-2006 */ hw->phy->supported.channels[1] |= 1; /* 915 MHz ASK 802.15.4-2006 */ hw->phy->supported.channels[1] |= 0x7fe; /* 868 MHz O-QPSK 802.15.4-2006 */ hw->phy->supported.channels[2] |= 1; /* 915 MHz O-QPSK 802.15.4-2006 */ hw->phy->supported.channels[2] |= 0x7fe; /* 2.4 GHz CSS 802.15.4a-2007 */ hw->phy->supported.channels[3] |= 0x3fff; /* UWB Sub-gigahertz 802.15.4a-2007 */ hw->phy->supported.channels[4] |= 1; /* UWB Low band 802.15.4a-2007 */ hw->phy->supported.channels[4] |= 0x1e; /* UWB High band 802.15.4a-2007 */ hw->phy->supported.channels[4] |= 0xffe0; /* 750 MHz O-QPSK 802.15.4c-2009 */ hw->phy->supported.channels[5] |= 0xf; /* 750 MHz MPSK 802.15.4c-2009 */ hw->phy->supported.channels[5] |= 0xf0; /* 950 MHz BPSK 802.15.4d-2009 */ hw->phy->supported.channels[6] |= 0x3ff; /* 950 MHz GFSK 802.15.4d-2009 */ hw->phy->supported.channels[6] |= 0x3ffc00; ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); /* hwsim phy channel 13 as default */ hw->phy->current_channel = 13; pib = kzalloc(sizeof(*pib), GFP_KERNEL); if (!pib) { err = -ENOMEM; goto err_pib; } pib->channel = 13; pib->filt.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); pib->filt.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); rcu_assign_pointer(phy->pib, pib); phy->idx = idx; INIT_LIST_HEAD(&phy->edges); hw->flags = IEEE802154_HW_PROMISCUOUS; hw->parent = dev; err = ieee802154_register_hw(hw); if (err) goto err_reg; mutex_lock(&hwsim_phys_lock); if (init) { err = hwsim_subscribe_all_others(phy); if (err < 0) { mutex_unlock(&hwsim_phys_lock); goto err_subscribe; } } list_add_tail(&phy->list, &hwsim_phys); mutex_unlock(&hwsim_phys_lock); hwsim_mcast_new_radio(info, phy); return idx; err_subscribe: ieee802154_unregister_hw(phy->hw); err_reg: kfree(pib); err_pib: ieee802154_free_hw(phy->hw); return err; } static void hwsim_del(struct hwsim_phy *phy) { struct hwsim_pib *pib; struct hwsim_edge *e; hwsim_edge_unsubscribe_me(phy); list_del(&phy->list); rcu_read_lock(); list_for_each_entry_rcu(e, &phy->edges, list) { list_del_rcu(&e->list); hwsim_free_edge(e); } pib = rcu_dereference(phy->pib); rcu_read_unlock(); kfree_rcu(pib, rcu); ieee802154_unregister_hw(phy->hw); ieee802154_free_hw(phy->hw); } static int hwsim_probe(struct platform_device *pdev) { struct hwsim_phy *phy, *tmp; int err, i; for (i = 0; i < 2; i++) { err = hwsim_add_one(NULL, &pdev->dev, true); if (err < 0) goto err_slave; } dev_info(&pdev->dev, "Added 2 mac802154 hwsim hardware radios\n"); return 0; err_slave: mutex_lock(&hwsim_phys_lock); list_for_each_entry_safe(phy, tmp, &hwsim_phys, list) hwsim_del(phy); mutex_unlock(&hwsim_phys_lock); return err; } static int hwsim_remove(struct platform_device *pdev) { struct hwsim_phy *phy, *tmp; mutex_lock(&hwsim_phys_lock); list_for_each_entry_safe(phy, tmp, &hwsim_phys, list) hwsim_del(phy); mutex_unlock(&hwsim_phys_lock); return 0; } static struct platform_driver mac802154hwsim_driver = { .probe = hwsim_probe, .remove = hwsim_remove, .driver = { .name = "mac802154_hwsim", }, }; static __init int hwsim_init_module(void) { int rc; rc = genl_register_family(&hwsim_genl_family); if (rc) return rc; mac802154hwsim_dev = platform_device_register_simple("mac802154_hwsim", -1, NULL, 0); if (IS_ERR(mac802154hwsim_dev)) { rc = PTR_ERR(mac802154hwsim_dev); goto platform_dev; } rc = platform_driver_register(&mac802154hwsim_driver); if (rc < 0) goto platform_drv; return 0; platform_drv: platform_device_unregister(mac802154hwsim_dev); platform_dev: genl_unregister_family(&hwsim_genl_family); return rc; } static __exit void hwsim_remove_module(void) { genl_unregister_family(&hwsim_genl_family); platform_driver_unregister(&mac802154hwsim_driver); platform_device_unregister(mac802154hwsim_dev); } module_init(hwsim_init_module); module_exit(hwsim_remove_module);
linux-master
drivers/net/ieee802154/mac802154_hwsim.c
// SPDX-License-Identifier: GPL-2.0-only /* * AT86RF230/RF231 driver * * Copyright (C) 2009-2012 Siemens AG * * Written by: * Dmitry Eremin-Solenikov <[email protected]> * Alexander Smirnov <[email protected]> * Alexander Aring <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/hrtimer.h> #include <linux/jiffies.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/property.h> #include <linux/spi/spi.h> #include <linux/regmap.h> #include <linux/skbuff.h> #include <linux/of_gpio.h> #include <linux/ieee802154.h> #include <net/mac802154.h> #include <net/cfg802154.h> #include "at86rf230.h" struct at86rf230_local; /* at86rf2xx chip depend data. * All timings are in us. */ struct at86rf2xx_chip_data { u16 t_sleep_cycle; u16 t_channel_switch; u16 t_reset_to_off; u16 t_off_to_aack; u16 t_off_to_tx_on; u16 t_off_to_sleep; u16 t_sleep_to_off; u16 t_frame; u16 t_p_ack; int rssi_base_val; int (*set_channel)(struct at86rf230_local *, u8, u8); int (*set_txpower)(struct at86rf230_local *, s32); }; #define AT86RF2XX_MAX_BUF (127 + 3) /* tx retries to access the TX_ON state * if it's above then force change will be started. * * We assume the max_frame_retries (7) value of 802.15.4 here. */ #define AT86RF2XX_MAX_TX_RETRIES 7 /* We use the recommended 5 minutes timeout to recalibrate */ #define AT86RF2XX_CAL_LOOP_TIMEOUT (5 * 60 * HZ) struct at86rf230_state_change { struct at86rf230_local *lp; int irq; struct hrtimer timer; struct spi_message msg; struct spi_transfer trx; u8 buf[AT86RF2XX_MAX_BUF]; void (*complete)(void *context); u8 from_state; u8 to_state; int trac; bool free; }; struct at86rf230_local { struct spi_device *spi; struct ieee802154_hw *hw; struct at86rf2xx_chip_data *data; struct regmap *regmap; struct gpio_desc *slp_tr; bool sleep; struct completion state_complete; struct at86rf230_state_change state; unsigned long cal_timeout; bool is_tx; bool is_tx_from_off; bool was_tx; u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; }; #define AT86RF2XX_NUMREGS 0x3F static void at86rf230_async_state_change(struct at86rf230_local *lp, struct at86rf230_state_change *ctx, const u8 state, void (*complete)(void *context)); static inline void at86rf230_sleep(struct at86rf230_local *lp) { if (lp->slp_tr) { gpiod_set_value(lp->slp_tr, 1); usleep_range(lp->data->t_off_to_sleep, lp->data->t_off_to_sleep + 10); lp->sleep = true; } } static inline void at86rf230_awake(struct at86rf230_local *lp) { if (lp->slp_tr) { gpiod_set_value(lp->slp_tr, 0); usleep_range(lp->data->t_sleep_to_off, lp->data->t_sleep_to_off + 100); lp->sleep = false; } } static inline int __at86rf230_write(struct at86rf230_local *lp, unsigned int addr, unsigned int data) { bool sleep = lp->sleep; int ret; /* awake for register setting if sleep */ if (sleep) at86rf230_awake(lp); ret = regmap_write(lp->regmap, addr, data); /* sleep again if was sleeping */ if (sleep) at86rf230_sleep(lp); return ret; } static inline int __at86rf230_read(struct at86rf230_local *lp, unsigned int addr, unsigned int *data) { bool sleep = lp->sleep; int ret; /* awake for register setting if sleep */ if (sleep) at86rf230_awake(lp); ret = regmap_read(lp->regmap, addr, data); /* sleep again if was sleeping */ if (sleep) at86rf230_sleep(lp); return ret; } static inline int at86rf230_read_subreg(struct at86rf230_local *lp, unsigned int addr, unsigned int mask, unsigned int shift, unsigned int *data) { int rc; rc = __at86rf230_read(lp, addr, data); if (!rc) *data = (*data & mask) >> shift; return rc; } static inline int at86rf230_write_subreg(struct at86rf230_local *lp, unsigned int addr, unsigned int mask, unsigned int shift, unsigned int data) { bool sleep = lp->sleep; int ret; /* awake for register setting if sleep */ if (sleep) at86rf230_awake(lp); ret = regmap_update_bits(lp->regmap, addr, mask, data << shift); /* sleep again if was sleeping */ if (sleep) at86rf230_sleep(lp); return ret; } static inline void at86rf230_slp_tr_rising_edge(struct at86rf230_local *lp) { gpiod_set_value(lp->slp_tr, 1); udelay(1); gpiod_set_value(lp->slp_tr, 0); } static bool at86rf230_reg_writeable(struct device *dev, unsigned int reg) { switch (reg) { case RG_TRX_STATE: case RG_TRX_CTRL_0: case RG_TRX_CTRL_1: case RG_PHY_TX_PWR: case RG_PHY_ED_LEVEL: case RG_PHY_CC_CCA: case RG_CCA_THRES: case RG_RX_CTRL: case RG_SFD_VALUE: case RG_TRX_CTRL_2: case RG_ANT_DIV: case RG_IRQ_MASK: case RG_VREG_CTRL: case RG_BATMON: case RG_XOSC_CTRL: case RG_RX_SYN: case RG_XAH_CTRL_1: case RG_FTN_CTRL: case RG_PLL_CF: case RG_PLL_DCU: case RG_SHORT_ADDR_0: case RG_SHORT_ADDR_1: case RG_PAN_ID_0: case RG_PAN_ID_1: case RG_IEEE_ADDR_0: case RG_IEEE_ADDR_1: case RG_IEEE_ADDR_2: case RG_IEEE_ADDR_3: case RG_IEEE_ADDR_4: case RG_IEEE_ADDR_5: case RG_IEEE_ADDR_6: case RG_IEEE_ADDR_7: case RG_XAH_CTRL_0: case RG_CSMA_SEED_0: case RG_CSMA_SEED_1: case RG_CSMA_BE: return true; default: return false; } } static bool at86rf230_reg_readable(struct device *dev, unsigned int reg) { bool rc; /* all writeable are also readable */ rc = at86rf230_reg_writeable(dev, reg); if (rc) return rc; /* readonly regs */ switch (reg) { case RG_TRX_STATUS: case RG_PHY_RSSI: case RG_IRQ_STATUS: case RG_PART_NUM: case RG_VERSION_NUM: case RG_MAN_ID_1: case RG_MAN_ID_0: return true; default: return false; } } static bool at86rf230_reg_volatile(struct device *dev, unsigned int reg) { /* can be changed during runtime */ switch (reg) { case RG_TRX_STATUS: case RG_TRX_STATE: case RG_PHY_RSSI: case RG_PHY_ED_LEVEL: case RG_IRQ_STATUS: case RG_VREG_CTRL: case RG_PLL_CF: case RG_PLL_DCU: return true; default: return false; } } static bool at86rf230_reg_precious(struct device *dev, unsigned int reg) { /* don't clear irq line on read */ switch (reg) { case RG_IRQ_STATUS: return true; default: return false; } } static const struct regmap_config at86rf230_regmap_spi_config = { .reg_bits = 8, .val_bits = 8, .write_flag_mask = CMD_REG | CMD_WRITE, .read_flag_mask = CMD_REG, .cache_type = REGCACHE_RBTREE, .max_register = AT86RF2XX_NUMREGS, .writeable_reg = at86rf230_reg_writeable, .readable_reg = at86rf230_reg_readable, .volatile_reg = at86rf230_reg_volatile, .precious_reg = at86rf230_reg_precious, }; static void at86rf230_async_error_recover_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; if (ctx->free) kfree(ctx); if (lp->was_tx) { lp->was_tx = 0; ieee802154_xmit_hw_error(lp->hw, lp->tx_skb); } } static void at86rf230_async_error_recover(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; if (lp->is_tx) { lp->was_tx = 1; lp->is_tx = 0; } at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_async_error_recover_complete); } static inline void at86rf230_async_error(struct at86rf230_local *lp, struct at86rf230_state_change *ctx, int rc) { dev_err(&lp->spi->dev, "spi_async error %d\n", rc); at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, at86rf230_async_error_recover); } /* Generic function to get some register value in async mode */ static void at86rf230_async_read_reg(struct at86rf230_local *lp, u8 reg, struct at86rf230_state_change *ctx, void (*complete)(void *context)) { int rc; u8 *tx_buf = ctx->buf; tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG; ctx->msg.complete = complete; rc = spi_async(lp->spi, &ctx->msg); if (rc) at86rf230_async_error(lp, ctx, rc); } static void at86rf230_async_write_reg(struct at86rf230_local *lp, u8 reg, u8 val, struct at86rf230_state_change *ctx, void (*complete)(void *context)) { int rc; ctx->buf[0] = (reg & CMD_REG_MASK) | CMD_REG | CMD_WRITE; ctx->buf[1] = val; ctx->msg.complete = complete; rc = spi_async(lp->spi, &ctx->msg); if (rc) at86rf230_async_error(lp, ctx, rc); } static void at86rf230_async_state_assert(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; const u8 *buf = ctx->buf; const u8 trx_state = buf[1] & TRX_STATE_MASK; /* Assert state change */ if (trx_state != ctx->to_state) { /* Special handling if transceiver state is in * STATE_BUSY_RX_AACK and a SHR was detected. */ if (trx_state == STATE_BUSY_RX_AACK) { /* Undocumented race condition. If we send a state * change to STATE_RX_AACK_ON the transceiver could * change his state automatically to STATE_BUSY_RX_AACK * if a SHR was detected. This is not an error, but we * can't assert this. */ if (ctx->to_state == STATE_RX_AACK_ON) goto done; /* If we change to STATE_TX_ON without forcing and * transceiver state is STATE_BUSY_RX_AACK, we wait * 'tFrame + tPAck' receiving time. In this time the * PDU should be received. If the transceiver is still * in STATE_BUSY_RX_AACK, we run a force state change * to STATE_TX_ON. This is a timeout handling, if the * transceiver stucks in STATE_BUSY_RX_AACK. * * Additional we do several retries to try to get into * TX_ON state without forcing. If the retries are * higher or equal than AT86RF2XX_MAX_TX_RETRIES we * will do a force change. */ if (ctx->to_state == STATE_TX_ON || ctx->to_state == STATE_TRX_OFF) { u8 state = ctx->to_state; if (lp->tx_retry >= AT86RF2XX_MAX_TX_RETRIES) state = STATE_FORCE_TRX_OFF; lp->tx_retry++; at86rf230_async_state_change(lp, ctx, state, ctx->complete); return; } } dev_warn(&lp->spi->dev, "unexcept state change from 0x%02x to 0x%02x. Actual state: 0x%02x\n", ctx->from_state, ctx->to_state, trx_state); } done: if (ctx->complete) ctx->complete(context); } static enum hrtimer_restart at86rf230_async_state_timer(struct hrtimer *timer) { struct at86rf230_state_change *ctx = container_of(timer, struct at86rf230_state_change, timer); struct at86rf230_local *lp = ctx->lp; at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, at86rf230_async_state_assert); return HRTIMER_NORESTART; } /* Do state change timing delay. */ static void at86rf230_async_state_delay(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; struct at86rf2xx_chip_data *c = lp->data; bool force = false; ktime_t tim; /* The force state changes are will show as normal states in the * state status subregister. We change the to_state to the * corresponding one and remember if it was a force change, this * differs if we do a state change from STATE_BUSY_RX_AACK. */ switch (ctx->to_state) { case STATE_FORCE_TX_ON: ctx->to_state = STATE_TX_ON; force = true; break; case STATE_FORCE_TRX_OFF: ctx->to_state = STATE_TRX_OFF; force = true; break; default: break; } switch (ctx->from_state) { case STATE_TRX_OFF: switch (ctx->to_state) { case STATE_RX_AACK_ON: tim = c->t_off_to_aack * NSEC_PER_USEC; /* state change from TRX_OFF to RX_AACK_ON to do a * calibration, we need to reset the timeout for the * next one. */ lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; goto change; case STATE_TX_ARET_ON: case STATE_TX_ON: tim = c->t_off_to_tx_on * NSEC_PER_USEC; /* state change from TRX_OFF to TX_ON or ARET_ON to do * a calibration, we need to reset the timeout for the * next one. */ lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; goto change; default: break; } break; case STATE_BUSY_RX_AACK: switch (ctx->to_state) { case STATE_TRX_OFF: case STATE_TX_ON: /* Wait for worst case receiving time if we * didn't make a force change from BUSY_RX_AACK * to TX_ON or TRX_OFF. */ if (!force) { tim = (c->t_frame + c->t_p_ack) * NSEC_PER_USEC; goto change; } break; default: break; } break; /* Default value, means RESET state */ case STATE_P_ON: switch (ctx->to_state) { case STATE_TRX_OFF: tim = c->t_reset_to_off * NSEC_PER_USEC; goto change; default: break; } break; default: break; } /* Default delay is 1us in the most cases */ udelay(1); at86rf230_async_state_timer(&ctx->timer); return; change: hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL); } static void at86rf230_async_state_change_start(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; u8 *buf = ctx->buf; const u8 trx_state = buf[1] & TRX_STATE_MASK; /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ if (trx_state == STATE_TRANSITION_IN_PROGRESS) { udelay(1); at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, at86rf230_async_state_change_start); return; } /* Check if we already are in the state which we change in */ if (trx_state == ctx->to_state) { if (ctx->complete) ctx->complete(context); return; } /* Set current state to the context of state change */ ctx->from_state = trx_state; /* Going into the next step for a state change which do a timing * relevant delay. */ at86rf230_async_write_reg(lp, RG_TRX_STATE, ctx->to_state, ctx, at86rf230_async_state_delay); } static void at86rf230_async_state_change(struct at86rf230_local *lp, struct at86rf230_state_change *ctx, const u8 state, void (*complete)(void *context)) { /* Initialization for the state change context */ ctx->to_state = state; ctx->complete = complete; at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, at86rf230_async_state_change_start); } static void at86rf230_sync_state_change_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; complete(&lp->state_complete); } /* This function do a sync framework above the async state change. * Some callbacks of the IEEE 802.15.4 driver interface need to be * handled synchronously. */ static int at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state) { unsigned long rc; at86rf230_async_state_change(lp, &lp->state, state, at86rf230_sync_state_change_complete); rc = wait_for_completion_timeout(&lp->state_complete, msecs_to_jiffies(100)); if (!rc) { at86rf230_async_error(lp, &lp->state, -ETIMEDOUT); return -ETIMEDOUT; } return 0; } static void at86rf230_tx_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; if (ctx->trac == IEEE802154_SUCCESS) ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); else ieee802154_xmit_error(lp->hw, lp->tx_skb, ctx->trac); kfree(ctx); } static void at86rf230_tx_on(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_tx_complete); } static void at86rf230_tx_trac_check(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; u8 trac = TRAC_MASK(ctx->buf[1]); switch (trac) { case TRAC_SUCCESS: case TRAC_SUCCESS_DATA_PENDING: ctx->trac = IEEE802154_SUCCESS; break; case TRAC_CHANNEL_ACCESS_FAILURE: ctx->trac = IEEE802154_CHANNEL_ACCESS_FAILURE; break; case TRAC_NO_ACK: ctx->trac = IEEE802154_NO_ACK; break; default: ctx->trac = IEEE802154_SYSTEM_ERROR; } at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on); } static void at86rf230_rx_read_frame_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; const u8 *buf = ctx->buf; struct sk_buff *skb; u8 len, lqi; len = buf[1]; if (!ieee802154_is_valid_psdu_len(len)) { dev_vdbg(&lp->spi->dev, "corrupted frame received\n"); len = IEEE802154_MTU; } lqi = buf[2 + len]; skb = dev_alloc_skb(IEEE802154_MTU); if (!skb) { dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n"); kfree(ctx); return; } skb_put_data(skb, buf + 2, len); ieee802154_rx_irqsafe(lp->hw, skb, lqi); kfree(ctx); } static void at86rf230_rx_trac_check(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; u8 *buf = ctx->buf; int rc; buf[0] = CMD_FB; ctx->trx.len = AT86RF2XX_MAX_BUF; ctx->msg.complete = at86rf230_rx_read_frame_complete; rc = spi_async(lp->spi, &ctx->msg); if (rc) { ctx->trx.len = 2; at86rf230_async_error(lp, ctx, rc); } } static void at86rf230_irq_trx_end(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; if (lp->is_tx) { lp->is_tx = 0; at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx, at86rf230_tx_trac_check); } else { at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx, at86rf230_rx_trac_check); } } static void at86rf230_irq_status(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; const u8 *buf = ctx->buf; u8 irq = buf[1]; enable_irq(lp->spi->irq); if (irq & IRQ_TRX_END) { at86rf230_irq_trx_end(ctx); } else { dev_err(&lp->spi->dev, "not supported irq %02x received\n", irq); kfree(ctx); } } static void at86rf230_setup_spi_messages(struct at86rf230_local *lp, struct at86rf230_state_change *state) { state->lp = lp; state->irq = lp->spi->irq; spi_message_init(&state->msg); state->msg.context = state; state->trx.len = 2; state->trx.tx_buf = state->buf; state->trx.rx_buf = state->buf; spi_message_add_tail(&state->trx, &state->msg); hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); state->timer.function = at86rf230_async_state_timer; } static irqreturn_t at86rf230_isr(int irq, void *data) { struct at86rf230_local *lp = data; struct at86rf230_state_change *ctx; int rc; disable_irq_nosync(irq); ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); if (!ctx) { enable_irq(irq); return IRQ_NONE; } at86rf230_setup_spi_messages(lp, ctx); /* tell on error handling to free ctx */ ctx->free = true; ctx->buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; ctx->msg.complete = at86rf230_irq_status; rc = spi_async(lp->spi, &ctx->msg); if (rc) { at86rf230_async_error(lp, ctx, rc); enable_irq(irq); return IRQ_NONE; } return IRQ_HANDLED; } static void at86rf230_write_frame_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; ctx->trx.len = 2; if (lp->slp_tr) at86rf230_slp_tr_rising_edge(lp); else at86rf230_async_write_reg(lp, RG_TRX_STATE, STATE_BUSY_TX, ctx, NULL); } static void at86rf230_write_frame(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; struct sk_buff *skb = lp->tx_skb; u8 *buf = ctx->buf; int rc; lp->is_tx = 1; buf[0] = CMD_FB | CMD_WRITE; buf[1] = skb->len + 2; memcpy(buf + 2, skb->data, skb->len); ctx->trx.len = skb->len + 2; ctx->msg.complete = at86rf230_write_frame_complete; rc = spi_async(lp->spi, &ctx->msg); if (rc) { ctx->trx.len = 2; at86rf230_async_error(lp, ctx, rc); } } static void at86rf230_xmit_tx_on(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, at86rf230_write_frame); } static void at86rf230_xmit_start(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; /* check if we change from off state */ if (lp->is_tx_from_off) at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, at86rf230_write_frame); else at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_xmit_tx_on); } static int at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct at86rf230_local *lp = hw->priv; struct at86rf230_state_change *ctx = &lp->tx; lp->tx_skb = skb; lp->tx_retry = 0; /* After 5 minutes in PLL and the same frequency we run again the * calibration loops which is recommended by at86rf2xx datasheets. * * The calibration is initiate by a state change from TRX_OFF * to TX_ON, the lp->cal_timeout should be reinit by state_delay * function then to start in the next 5 minutes. */ if (time_is_before_jiffies(lp->cal_timeout)) { lp->is_tx_from_off = true; at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, at86rf230_xmit_start); } else { lp->is_tx_from_off = false; at86rf230_xmit_start(ctx); } return 0; } static int at86rf230_ed(struct ieee802154_hw *hw, u8 *level) { WARN_ON(!level); *level = 0xbe; return 0; } static int at86rf230_start(struct ieee802154_hw *hw) { struct at86rf230_local *lp = hw->priv; at86rf230_awake(lp); enable_irq(lp->spi->irq); return at86rf230_sync_state_change(lp, STATE_RX_AACK_ON); } static void at86rf230_stop(struct ieee802154_hw *hw) { struct at86rf230_local *lp = hw->priv; u8 csma_seed[2]; at86rf230_sync_state_change(lp, STATE_FORCE_TRX_OFF); disable_irq(lp->spi->irq); /* It's recommended to set random new csma_seeds before sleep state. * Makes only sense in the stop callback, not doing this inside of * at86rf230_sleep, this is also used when we don't transmit afterwards * when calling start callback again. */ get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed)); at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]); at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]); at86rf230_sleep(lp); } static int at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) { return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } #define AT86RF2XX_MAX_ED_LEVELS 0xF static const s32 at86rf233_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = { -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600, -7400, -7200, -7000, -6800, -6600, -6400, }; static const s32 at86rf231_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = { -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300, -7100, -6900, -6700, -6500, -6300, -6100, }; static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = { -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600, -7400, -7200, -7000, }; static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = { -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600, -7400, -7200, -7000, -6800, }; static inline int at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val) { unsigned int cca_ed_thres; int rc; rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres); if (rc < 0) return rc; switch (rssi_base_val) { case -98: lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98); lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres]; break; case -100: lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres]; break; default: WARN_ON(1); } return 0; } static int at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) { int rc; if (channel == 0) rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 0); else rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 1); if (rc < 0) return rc; if (page == 0) { rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0); lp->data->rssi_base_val = -100; } else { rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1); lp->data->rssi_base_val = -98; } if (rc < 0) return rc; rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val); if (rc < 0) return rc; return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } static int at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct at86rf230_local *lp = hw->priv; int rc; rc = lp->data->set_channel(lp, page, channel); /* Wait for PLL */ usleep_range(lp->data->t_channel_switch, lp->data->t_channel_switch + 10); lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; return rc; } static int at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { struct at86rf230_local *lp = hw->priv; if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__); __at86rf230_write(lp, RG_SHORT_ADDR_0, addr); __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); } if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 pan = le16_to_cpu(filt->pan_id); dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__); __at86rf230_write(lp, RG_PAN_ID_0, pan); __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); } if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { u8 i, addr[8]; memcpy(addr, &filt->ieee_addr, 8); dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__); for (i = 0; i < 8; i++) __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__); if (filt->pan_coord) at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1); else at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 0); } return 0; } #define AT86RF23X_MAX_TX_POWERS 0xF static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = { 400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600, -800, -1200, -1700, }; static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = { 300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700, -900, -1200, -1700, }; #define AT86RF212_MAX_TX_POWERS 0x1F static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = { 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700, -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700, -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600, }; static int at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm) { u32 i; for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { if (lp->hw->phy->supported.tx_powers[i] == mbm) return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i); } return -EINVAL; } static int at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm) { u32 i; for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { if (lp->hw->phy->supported.tx_powers[i] == mbm) return at86rf230_write_subreg(lp, SR_TX_PWR_212, i); } return -EINVAL; } static int at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm) { struct at86rf230_local *lp = hw->priv; return lp->data->set_txpower(lp, mbm); } static int at86rf230_set_lbt(struct ieee802154_hw *hw, bool on) { struct at86rf230_local *lp = hw->priv; return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on); } static int at86rf230_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca) { struct at86rf230_local *lp = hw->priv; u8 val; /* mapping 802.15.4 to driver spec */ switch (cca->mode) { case NL802154_CCA_ENERGY: val = 1; break; case NL802154_CCA_CARRIER: val = 2; break; case NL802154_CCA_ENERGY_CARRIER: switch (cca->opt) { case NL802154_CCA_OPT_ENERGY_CARRIER_AND: val = 3; break; case NL802154_CCA_OPT_ENERGY_CARRIER_OR: val = 0; break; default: return -EINVAL; } break; default: return -EINVAL; } return at86rf230_write_subreg(lp, SR_CCA_MODE, val); } static int at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct at86rf230_local *lp = hw->priv; u32 i; for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { if (hw->phy->supported.cca_ed_levels[i] == mbm) return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i); } return -EINVAL; } static int at86rf230_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) { struct at86rf230_local *lp = hw->priv; int rc; rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be); if (rc) return rc; rc = at86rf230_write_subreg(lp, SR_MAX_BE, max_be); if (rc) return rc; return at86rf230_write_subreg(lp, SR_MAX_CSMA_RETRIES, retries); } static int at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct at86rf230_local *lp = hw->priv; return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries); } static int at86rf230_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { struct at86rf230_local *lp = hw->priv; int rc; if (on) { rc = at86rf230_write_subreg(lp, SR_AACK_DIS_ACK, 1); if (rc < 0) return rc; rc = at86rf230_write_subreg(lp, SR_AACK_PROM_MODE, 1); if (rc < 0) return rc; } else { rc = at86rf230_write_subreg(lp, SR_AACK_PROM_MODE, 0); if (rc < 0) return rc; rc = at86rf230_write_subreg(lp, SR_AACK_DIS_ACK, 0); if (rc < 0) return rc; } return 0; } static const struct ieee802154_ops at86rf230_ops = { .owner = THIS_MODULE, .xmit_async = at86rf230_xmit, .ed = at86rf230_ed, .set_channel = at86rf230_channel, .start = at86rf230_start, .stop = at86rf230_stop, .set_hw_addr_filt = at86rf230_set_hw_addr_filt, .set_txpower = at86rf230_set_txpower, .set_lbt = at86rf230_set_lbt, .set_cca_mode = at86rf230_set_cca_mode, .set_cca_ed_level = at86rf230_set_cca_ed_level, .set_csma_params = at86rf230_set_csma_params, .set_frame_retries = at86rf230_set_frame_retries, .set_promiscuous_mode = at86rf230_set_promiscuous_mode, }; static struct at86rf2xx_chip_data at86rf233_data = { .t_sleep_cycle = 330, .t_channel_switch = 11, .t_reset_to_off = 26, .t_off_to_aack = 80, .t_off_to_tx_on = 80, .t_off_to_sleep = 35, .t_sleep_to_off = 1000, .t_frame = 4096, .t_p_ack = 545, .rssi_base_val = -94, .set_channel = at86rf23x_set_channel, .set_txpower = at86rf23x_set_txpower, }; static struct at86rf2xx_chip_data at86rf231_data = { .t_sleep_cycle = 330, .t_channel_switch = 24, .t_reset_to_off = 37, .t_off_to_aack = 110, .t_off_to_tx_on = 110, .t_off_to_sleep = 35, .t_sleep_to_off = 1000, .t_frame = 4096, .t_p_ack = 545, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, .set_txpower = at86rf23x_set_txpower, }; static struct at86rf2xx_chip_data at86rf212_data = { .t_sleep_cycle = 330, .t_channel_switch = 11, .t_reset_to_off = 26, .t_off_to_aack = 200, .t_off_to_tx_on = 200, .t_off_to_sleep = 35, .t_sleep_to_off = 1000, .t_frame = 4096, .t_p_ack = 545, .rssi_base_val = -100, .set_channel = at86rf212_set_channel, .set_txpower = at86rf212_set_txpower, }; static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim) { int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH; unsigned int dvdd; u8 csma_seed[2]; rc = at86rf230_sync_state_change(lp, STATE_FORCE_TRX_OFF); if (rc) return rc; irq_type = irq_get_trigger_type(lp->spi->irq); if (irq_type == IRQ_TYPE_EDGE_FALLING || irq_type == IRQ_TYPE_LEVEL_LOW) irq_pol = IRQ_ACTIVE_LOW; rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol); if (rc) return rc; rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1); if (rc) return rc; rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, IRQ_TRX_END); if (rc) return rc; /* reset values differs in at86rf231 and at86rf233 */ rc = at86rf230_write_subreg(lp, SR_IRQ_MASK_MODE, 0); if (rc) return rc; get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed)); rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]); if (rc) return rc; rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]); if (rc) return rc; /* CLKM changes are applied immediately */ rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00); if (rc) return rc; /* Turn CLKM Off */ rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00); if (rc) return rc; /* Wait the next SLEEP cycle */ usleep_range(lp->data->t_sleep_cycle, lp->data->t_sleep_cycle + 100); /* xtal_trim value is calculated by: * CL = 0.5 * (CX + CTRIM + CPAR) * * whereas: * CL = capacitor of used crystal * CX = connected capacitors at xtal pins * CPAR = in all at86rf2xx datasheets this is a constant value 3 pF, * but this is different on each board setup. You need to fine * tuning this value via CTRIM. * CTRIM = variable capacitor setting. Resolution is 0.3 pF range is * 0 pF upto 4.5 pF. * * Examples: * atben transceiver: * * CL = 8 pF * CX = 12 pF * CPAR = 3 pF (We assume the magic constant from datasheet) * CTRIM = 0.9 pF * * (12+0.9+3)/2 = 7.95 which is nearly at 8 pF * * xtal_trim = 0x3 * * openlabs transceiver: * * CL = 16 pF * CX = 22 pF * CPAR = 3 pF (We assume the magic constant from datasheet) * CTRIM = 4.5 pF * * (22+4.5+3)/2 = 14.75 which is the nearest value to 16 pF * * xtal_trim = 0xf */ rc = at86rf230_write_subreg(lp, SR_XTAL_TRIM, xtal_trim); if (rc) return rc; rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd); if (rc) return rc; if (!dvdd) { dev_err(&lp->spi->dev, "DVDD error\n"); return -EINVAL; } /* Force setting slotted operation bit to 0. Sometimes the atben * sets this bit and I don't know why. We set this always force * to zero while probing. */ return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0); } static int at86rf230_detect_device(struct at86rf230_local *lp) { unsigned int part, version, val; u16 man_id = 0; const char *chip; int rc; rc = __at86rf230_read(lp, RG_MAN_ID_0, &val); if (rc) return rc; man_id |= val; rc = __at86rf230_read(lp, RG_MAN_ID_1, &val); if (rc) return rc; man_id |= (val << 8); rc = __at86rf230_read(lp, RG_PART_NUM, &part); if (rc) return rc; rc = __at86rf230_read(lp, RG_VERSION_NUM, &version); if (rc) return rc; if (man_id != 0x001f) { dev_err(&lp->spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n", man_id >> 8, man_id & 0xFF); return -EINVAL; } lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_CSMA_PARAMS | IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); lp->hw->phy->cca.mode = NL802154_CCA_ENERGY; switch (part) { case 2: chip = "at86rf230"; rc = -ENOTSUPP; goto not_supp; case 3: chip = "at86rf231"; lp->data = &at86rf231_data; lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 11; lp->hw->phy->supported.tx_powers = at86rf231_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers); lp->hw->phy->supported.cca_ed_levels = at86rf231_ed_levels; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf231_ed_levels); break; case 7: chip = "at86rf212"; lp->data = &at86rf212_data; lp->hw->flags |= IEEE802154_HW_LBT; lp->hw->phy->supported.channels[0] = 0x00007FF; lp->hw->phy->supported.channels[2] = 0x00007FF; lp->hw->phy->current_channel = 5; lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; lp->hw->phy->supported.tx_powers = at86rf212_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); break; case 11: chip = "at86rf233"; lp->data = &at86rf233_data; lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 13; lp->hw->phy->supported.tx_powers = at86rf233_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers); lp->hw->phy->supported.cca_ed_levels = at86rf233_ed_levels; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf233_ed_levels); break; default: chip = "unknown"; rc = -ENOTSUPP; goto not_supp; } lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7]; lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0]; not_supp: dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version); return rc; } static int at86rf230_probe(struct spi_device *spi) { struct ieee802154_hw *hw; struct at86rf230_local *lp; struct gpio_desc *slp_tr; struct gpio_desc *rstn; unsigned int status; int rc, irq_type; u8 xtal_trim; if (!spi->irq) { dev_err(&spi->dev, "no IRQ specified\n"); return -EINVAL; } rc = device_property_read_u8(&spi->dev, "xtal-trim", &xtal_trim); if (rc < 0) { if (rc != -EINVAL) { dev_err(&spi->dev, "failed to parse xtal-trim: %d\n", rc); return rc; } xtal_trim = 0; } rstn = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW); rc = PTR_ERR_OR_ZERO(rstn); if (rc) return rc; gpiod_set_consumer_name(rstn, "rstn"); slp_tr = devm_gpiod_get_optional(&spi->dev, "sleep", GPIOD_OUT_LOW); rc = PTR_ERR_OR_ZERO(slp_tr); if (rc) return rc; gpiod_set_consumer_name(slp_tr, "slp_tr"); /* Reset */ if (rstn) { udelay(1); gpiod_set_value_cansleep(rstn, 1); udelay(1); gpiod_set_value_cansleep(rstn, 0); usleep_range(120, 240); } hw = ieee802154_alloc_hw(sizeof(*lp), &at86rf230_ops); if (!hw) return -ENOMEM; lp = hw->priv; lp->hw = hw; lp->spi = spi; lp->slp_tr = slp_tr; hw->parent = &spi->dev; ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); lp->regmap = devm_regmap_init_spi(spi, &at86rf230_regmap_spi_config); if (IS_ERR(lp->regmap)) { rc = PTR_ERR(lp->regmap); dev_err(&spi->dev, "Failed to allocate register map: %d\n", rc); goto free_dev; } at86rf230_setup_spi_messages(lp, &lp->state); at86rf230_setup_spi_messages(lp, &lp->tx); rc = at86rf230_detect_device(lp); if (rc < 0) goto free_dev; init_completion(&lp->state_complete); spi_set_drvdata(spi, lp); rc = at86rf230_hw_init(lp, xtal_trim); if (rc) goto free_dev; /* Read irq status register to reset irq line */ rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status); if (rc) goto free_dev; irq_type = irq_get_trigger_type(spi->irq); if (!irq_type) irq_type = IRQF_TRIGGER_HIGH; rc = devm_request_irq(&spi->dev, spi->irq, at86rf230_isr, IRQF_SHARED | irq_type, dev_name(&spi->dev), lp); if (rc) goto free_dev; /* disable_irq by default and wait for starting hardware */ disable_irq(spi->irq); /* going into sleep by default */ at86rf230_sleep(lp); rc = ieee802154_register_hw(lp->hw); if (rc) goto free_dev; return rc; free_dev: ieee802154_free_hw(lp->hw); return rc; } static void at86rf230_remove(struct spi_device *spi) { struct at86rf230_local *lp = spi_get_drvdata(spi); /* mask all at86rf230 irq's */ at86rf230_write_subreg(lp, SR_IRQ_MASK, 0); ieee802154_unregister_hw(lp->hw); ieee802154_free_hw(lp->hw); dev_dbg(&spi->dev, "unregistered at86rf230\n"); } static const struct of_device_id at86rf230_of_match[] = { { .compatible = "atmel,at86rf230", }, { .compatible = "atmel,at86rf231", }, { .compatible = "atmel,at86rf233", }, { .compatible = "atmel,at86rf212", }, { }, }; MODULE_DEVICE_TABLE(of, at86rf230_of_match); static const struct spi_device_id at86rf230_device_id[] = { { .name = "at86rf230", }, { .name = "at86rf231", }, { .name = "at86rf233", }, { .name = "at86rf212", }, { }, }; MODULE_DEVICE_TABLE(spi, at86rf230_device_id); static struct spi_driver at86rf230_driver = { .id_table = at86rf230_device_id, .driver = { .of_match_table = at86rf230_of_match, .name = "at86rf230", }, .probe = at86rf230_probe, .remove = at86rf230_remove, }; module_spi_driver(at86rf230_driver); MODULE_DESCRIPTION("AT86RF230 Transceiver Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ieee802154/at86rf230.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Driver for TI CC2520 802.15.4 Wireless-PAN Networking controller * * Copyright (C) 2014 Varka Bhadram <[email protected]> * Md.Jamal Mohiuddin <[email protected]> * P Sowjanya <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio/consumer.h> #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/property.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/skbuff.h> #include <linux/ieee802154.h> #include <linux/crc-ccitt.h> #include <asm/unaligned.h> #include <net/mac802154.h> #include <net/cfg802154.h> #define SPI_COMMAND_BUFFER 3 #define HIGH 1 #define LOW 0 #define STATE_IDLE 0 #define RSSI_VALID 0 #define RSSI_OFFSET 78 #define CC2520_RAM_SIZE 640 #define CC2520_FIFO_SIZE 128 #define CC2520RAM_TXFIFO 0x100 #define CC2520RAM_RXFIFO 0x180 #define CC2520RAM_IEEEADDR 0x3EA #define CC2520RAM_PANID 0x3F2 #define CC2520RAM_SHORTADDR 0x3F4 #define CC2520_FREG_MASK 0x3F /* status byte values */ #define CC2520_STATUS_XOSC32M_STABLE BIT(7) #define CC2520_STATUS_RSSI_VALID BIT(6) #define CC2520_STATUS_TX_UNDERFLOW BIT(3) /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */ #define CC2520_MINCHANNEL 11 #define CC2520_MAXCHANNEL 26 #define CC2520_CHANNEL_SPACING 5 /* command strobes */ #define CC2520_CMD_SNOP 0x00 #define CC2520_CMD_IBUFLD 0x02 #define CC2520_CMD_SIBUFEX 0x03 #define CC2520_CMD_SSAMPLECCA 0x04 #define CC2520_CMD_SRES 0x0f #define CC2520_CMD_MEMORY_MASK 0x0f #define CC2520_CMD_MEMORY_READ 0x10 #define CC2520_CMD_MEMORY_WRITE 0x20 #define CC2520_CMD_RXBUF 0x30 #define CC2520_CMD_RXBUFCP 0x38 #define CC2520_CMD_RXBUFMOV 0x32 #define CC2520_CMD_TXBUF 0x3A #define CC2520_CMD_TXBUFCP 0x3E #define CC2520_CMD_RANDOM 0x3C #define CC2520_CMD_SXOSCON 0x40 #define CC2520_CMD_STXCAL 0x41 #define CC2520_CMD_SRXON 0x42 #define CC2520_CMD_STXON 0x43 #define CC2520_CMD_STXONCCA 0x44 #define CC2520_CMD_SRFOFF 0x45 #define CC2520_CMD_SXOSCOFF 0x46 #define CC2520_CMD_SFLUSHRX 0x47 #define CC2520_CMD_SFLUSHTX 0x48 #define CC2520_CMD_SACK 0x49 #define CC2520_CMD_SACKPEND 0x4A #define CC2520_CMD_SNACK 0x4B #define CC2520_CMD_SRXMASKBITSET 0x4C #define CC2520_CMD_SRXMASKBITCLR 0x4D #define CC2520_CMD_RXMASKAND 0x4E #define CC2520_CMD_RXMASKOR 0x4F #define CC2520_CMD_MEMCP 0x50 #define CC2520_CMD_MEMCPR 0x52 #define CC2520_CMD_MEMXCP 0x54 #define CC2520_CMD_MEMXWR 0x56 #define CC2520_CMD_BCLR 0x58 #define CC2520_CMD_BSET 0x59 #define CC2520_CMD_CTR_UCTR 0x60 #define CC2520_CMD_CBCMAC 0x64 #define CC2520_CMD_UCBCMAC 0x66 #define CC2520_CMD_CCM 0x68 #define CC2520_CMD_UCCM 0x6A #define CC2520_CMD_ECB 0x70 #define CC2520_CMD_ECBO 0x72 #define CC2520_CMD_ECBX 0x74 #define CC2520_CMD_INC 0x78 #define CC2520_CMD_ABORT 0x7F #define CC2520_CMD_REGISTER_READ 0x80 #define CC2520_CMD_REGISTER_WRITE 0xC0 /* status registers */ #define CC2520_CHIPID 0x40 #define CC2520_VERSION 0x42 #define CC2520_EXTCLOCK 0x44 #define CC2520_MDMCTRL0 0x46 #define CC2520_MDMCTRL1 0x47 #define CC2520_FREQEST 0x48 #define CC2520_RXCTRL 0x4A #define CC2520_FSCTRL 0x4C #define CC2520_FSCAL0 0x4E #define CC2520_FSCAL1 0x4F #define CC2520_FSCAL2 0x50 #define CC2520_FSCAL3 0x51 #define CC2520_AGCCTRL0 0x52 #define CC2520_AGCCTRL1 0x53 #define CC2520_AGCCTRL2 0x54 #define CC2520_AGCCTRL3 0x55 #define CC2520_ADCTEST0 0x56 #define CC2520_ADCTEST1 0x57 #define CC2520_ADCTEST2 0x58 #define CC2520_MDMTEST0 0x5A #define CC2520_MDMTEST1 0x5B #define CC2520_DACTEST0 0x5C #define CC2520_DACTEST1 0x5D #define CC2520_ATEST 0x5E #define CC2520_DACTEST2 0x5F #define CC2520_PTEST0 0x60 #define CC2520_PTEST1 0x61 #define CC2520_RESERVED 0x62 #define CC2520_DPUBIST 0x7A #define CC2520_ACTBIST 0x7C #define CC2520_RAMBIST 0x7E /* frame registers */ #define CC2520_FRMFILT0 0x00 #define CC2520_FRMFILT1 0x01 #define CC2520_SRCMATCH 0x02 #define CC2520_SRCSHORTEN0 0x04 #define CC2520_SRCSHORTEN1 0x05 #define CC2520_SRCSHORTEN2 0x06 #define CC2520_SRCEXTEN0 0x08 #define CC2520_SRCEXTEN1 0x09 #define CC2520_SRCEXTEN2 0x0A #define CC2520_FRMCTRL0 0x0C #define CC2520_FRMCTRL1 0x0D #define CC2520_RXENABLE0 0x0E #define CC2520_RXENABLE1 0x0F #define CC2520_EXCFLAG0 0x10 #define CC2520_EXCFLAG1 0x11 #define CC2520_EXCFLAG2 0x12 #define CC2520_EXCMASKA0 0x14 #define CC2520_EXCMASKA1 0x15 #define CC2520_EXCMASKA2 0x16 #define CC2520_EXCMASKB0 0x18 #define CC2520_EXCMASKB1 0x19 #define CC2520_EXCMASKB2 0x1A #define CC2520_EXCBINDX0 0x1C #define CC2520_EXCBINDX1 0x1D #define CC2520_EXCBINDY0 0x1E #define CC2520_EXCBINDY1 0x1F #define CC2520_GPIOCTRL0 0x20 #define CC2520_GPIOCTRL1 0x21 #define CC2520_GPIOCTRL2 0x22 #define CC2520_GPIOCTRL3 0x23 #define CC2520_GPIOCTRL4 0x24 #define CC2520_GPIOCTRL5 0x25 #define CC2520_GPIOPOLARITY 0x26 #define CC2520_GPIOCTRL 0x28 #define CC2520_DPUCON 0x2A #define CC2520_DPUSTAT 0x2C #define CC2520_FREQCTRL 0x2E #define CC2520_FREQTUNE 0x2F #define CC2520_TXPOWER 0x30 #define CC2520_TXCTRL 0x31 #define CC2520_FSMSTAT0 0x32 #define CC2520_FSMSTAT1 0x33 #define CC2520_FIFOPCTRL 0x34 #define CC2520_FSMCTRL 0x35 #define CC2520_CCACTRL0 0x36 #define CC2520_CCACTRL1 0x37 #define CC2520_RSSI 0x38 #define CC2520_RSSISTAT 0x39 #define CC2520_RXFIRST 0x3C #define CC2520_RXFIFOCNT 0x3E #define CC2520_TXFIFOCNT 0x3F /* CC2520_FRMFILT0 */ #define FRMFILT0_FRAME_FILTER_EN BIT(0) #define FRMFILT0_PAN_COORDINATOR BIT(1) /* CC2520_FRMCTRL0 */ #define FRMCTRL0_AUTOACK BIT(5) #define FRMCTRL0_AUTOCRC BIT(6) /* CC2520_FRMCTRL1 */ #define FRMCTRL1_SET_RXENMASK_ON_TX BIT(0) #define FRMCTRL1_IGNORE_TX_UNDERF BIT(1) /* Driver private information */ struct cc2520_private { struct spi_device *spi; /* SPI device structure */ struct ieee802154_hw *hw; /* IEEE-802.15.4 device */ u8 *buf; /* SPI TX/Rx data buffer */ struct mutex buffer_mutex; /* SPI buffer mutex */ bool is_tx; /* Flag for sync b/w Tx and Rx */ bool amplified; /* Flag for CC2591 */ struct gpio_desc *fifo_pin; /* FIFO GPIO pin number */ struct work_struct fifop_irqwork;/* Workqueue for FIFOP */ spinlock_t lock; /* Lock for is_tx*/ struct completion tx_complete; /* Work completion for Tx */ bool promiscuous; /* Flag for promiscuous mode */ }; /* Generic Functions */ static int cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd) { int ret; struct spi_message msg; struct spi_transfer xfer = { .len = 0, .tx_buf = priv->buf, .rx_buf = priv->buf, }; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); mutex_lock(&priv->buffer_mutex); priv->buf[xfer.len++] = cmd; dev_vdbg(&priv->spi->dev, "command strobe buf[0] = %02x\n", priv->buf[0]); ret = spi_sync(priv->spi, &msg); dev_vdbg(&priv->spi->dev, "buf[0] = %02x\n", priv->buf[0]); mutex_unlock(&priv->buffer_mutex); return ret; } static int cc2520_get_status(struct cc2520_private *priv, u8 *status) { int ret; struct spi_message msg; struct spi_transfer xfer = { .len = 0, .tx_buf = priv->buf, .rx_buf = priv->buf, }; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); mutex_lock(&priv->buffer_mutex); priv->buf[xfer.len++] = CC2520_CMD_SNOP; dev_vdbg(&priv->spi->dev, "get status command buf[0] = %02x\n", priv->buf[0]); ret = spi_sync(priv->spi, &msg); if (!ret) *status = priv->buf[0]; dev_vdbg(&priv->spi->dev, "buf[0] = %02x\n", priv->buf[0]); mutex_unlock(&priv->buffer_mutex); return ret; } static int cc2520_write_register(struct cc2520_private *priv, u8 reg, u8 value) { int status; struct spi_message msg; struct spi_transfer xfer = { .len = 0, .tx_buf = priv->buf, .rx_buf = priv->buf, }; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); mutex_lock(&priv->buffer_mutex); if (reg <= CC2520_FREG_MASK) { priv->buf[xfer.len++] = CC2520_CMD_REGISTER_WRITE | reg; priv->buf[xfer.len++] = value; } else { priv->buf[xfer.len++] = CC2520_CMD_MEMORY_WRITE; priv->buf[xfer.len++] = reg; priv->buf[xfer.len++] = value; } status = spi_sync(priv->spi, &msg); if (msg.status) status = msg.status; mutex_unlock(&priv->buffer_mutex); return status; } static int cc2520_write_ram(struct cc2520_private *priv, u16 reg, u8 len, u8 *data) { int status; struct spi_message msg; struct spi_transfer xfer_head = { .len = 0, .tx_buf = priv->buf, .rx_buf = priv->buf, }; struct spi_transfer xfer_buf = { .len = len, .tx_buf = data, }; mutex_lock(&priv->buffer_mutex); priv->buf[xfer_head.len++] = (CC2520_CMD_MEMORY_WRITE | ((reg >> 8) & 0xff)); priv->buf[xfer_head.len++] = reg & 0xff; spi_message_init(&msg); spi_message_add_tail(&xfer_head, &msg); spi_message_add_tail(&xfer_buf, &msg); status = spi_sync(priv->spi, &msg); dev_dbg(&priv->spi->dev, "spi status = %d\n", status); if (msg.status) status = msg.status; mutex_unlock(&priv->buffer_mutex); return status; } static int cc2520_read_register(struct cc2520_private *priv, u8 reg, u8 *data) { int status; struct spi_message msg; struct spi_transfer xfer1 = { .len = 0, .tx_buf = priv->buf, .rx_buf = priv->buf, }; struct spi_transfer xfer2 = { .len = 1, .rx_buf = data, }; spi_message_init(&msg); spi_message_add_tail(&xfer1, &msg); spi_message_add_tail(&xfer2, &msg); mutex_lock(&priv->buffer_mutex); priv->buf[xfer1.len++] = CC2520_CMD_MEMORY_READ; priv->buf[xfer1.len++] = reg; status = spi_sync(priv->spi, &msg); dev_dbg(&priv->spi->dev, "spi status = %d\n", status); if (msg.status) status = msg.status; mutex_unlock(&priv->buffer_mutex); return status; } static int cc2520_write_txfifo(struct cc2520_private *priv, u8 pkt_len, u8 *data, u8 len) { int status; /* length byte must include FCS even * if it is calculated in the hardware */ int len_byte = pkt_len; struct spi_message msg; struct spi_transfer xfer_head = { .len = 0, .tx_buf = priv->buf, .rx_buf = priv->buf, }; struct spi_transfer xfer_len = { .len = 1, .tx_buf = &len_byte, }; struct spi_transfer xfer_buf = { .len = len, .tx_buf = data, }; spi_message_init(&msg); spi_message_add_tail(&xfer_head, &msg); spi_message_add_tail(&xfer_len, &msg); spi_message_add_tail(&xfer_buf, &msg); mutex_lock(&priv->buffer_mutex); priv->buf[xfer_head.len++] = CC2520_CMD_TXBUF; dev_vdbg(&priv->spi->dev, "TX_FIFO cmd buf[0] = %02x\n", priv->buf[0]); status = spi_sync(priv->spi, &msg); dev_vdbg(&priv->spi->dev, "status = %d\n", status); if (msg.status) status = msg.status; dev_vdbg(&priv->spi->dev, "status = %d\n", status); dev_vdbg(&priv->spi->dev, "buf[0] = %02x\n", priv->buf[0]); mutex_unlock(&priv->buffer_mutex); return status; } static int cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len) { int status; struct spi_message msg; struct spi_transfer xfer_head = { .len = 0, .tx_buf = priv->buf, .rx_buf = priv->buf, }; struct spi_transfer xfer_buf = { .len = len, .rx_buf = data, }; spi_message_init(&msg); spi_message_add_tail(&xfer_head, &msg); spi_message_add_tail(&xfer_buf, &msg); mutex_lock(&priv->buffer_mutex); priv->buf[xfer_head.len++] = CC2520_CMD_RXBUF; dev_vdbg(&priv->spi->dev, "read rxfifo buf[0] = %02x\n", priv->buf[0]); dev_vdbg(&priv->spi->dev, "buf[1] = %02x\n", priv->buf[1]); status = spi_sync(priv->spi, &msg); dev_vdbg(&priv->spi->dev, "status = %d\n", status); if (msg.status) status = msg.status; dev_vdbg(&priv->spi->dev, "status = %d\n", status); dev_vdbg(&priv->spi->dev, "return status buf[0] = %02x\n", priv->buf[0]); dev_vdbg(&priv->spi->dev, "length buf[1] = %02x\n", priv->buf[1]); mutex_unlock(&priv->buffer_mutex); return status; } static int cc2520_start(struct ieee802154_hw *hw) { return cc2520_cmd_strobe(hw->priv, CC2520_CMD_SRXON); } static void cc2520_stop(struct ieee802154_hw *hw) { cc2520_cmd_strobe(hw->priv, CC2520_CMD_SRFOFF); } static int cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb) { struct cc2520_private *priv = hw->priv; unsigned long flags; int rc; u8 status = 0; u8 pkt_len; /* In promiscuous mode we disable AUTOCRC so we can get the raw CRC * values on RX. This means we need to manually add the CRC on TX. */ if (priv->promiscuous) { u16 crc = crc_ccitt(0, skb->data, skb->len); put_unaligned_le16(crc, skb_put(skb, 2)); pkt_len = skb->len; } else { pkt_len = skb->len + 2; } rc = cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHTX); if (rc) goto err_tx; rc = cc2520_write_txfifo(priv, pkt_len, skb->data, skb->len); if (rc) goto err_tx; rc = cc2520_get_status(priv, &status); if (rc) goto err_tx; if (status & CC2520_STATUS_TX_UNDERFLOW) { rc = -EINVAL; dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n"); goto err_tx; } spin_lock_irqsave(&priv->lock, flags); WARN_ON(priv->is_tx); priv->is_tx = 1; spin_unlock_irqrestore(&priv->lock, flags); rc = cc2520_cmd_strobe(priv, CC2520_CMD_STXONCCA); if (rc) goto err; rc = wait_for_completion_interruptible(&priv->tx_complete); if (rc < 0) goto err; cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHTX); cc2520_cmd_strobe(priv, CC2520_CMD_SRXON); return rc; err: spin_lock_irqsave(&priv->lock, flags); priv->is_tx = 0; spin_unlock_irqrestore(&priv->lock, flags); err_tx: return rc; } static int cc2520_rx(struct cc2520_private *priv) { u8 len = 0, lqi = 0, bytes = 1; struct sk_buff *skb; /* Read single length byte from the radio. */ cc2520_read_rxfifo(priv, &len, bytes); if (!ieee802154_is_valid_psdu_len(len)) { /* Corrupted frame received, clear frame buffer by * reading entire buffer. */ dev_dbg(&priv->spi->dev, "corrupted frame received\n"); len = IEEE802154_MTU; } skb = dev_alloc_skb(len); if (!skb) return -ENOMEM; if (cc2520_read_rxfifo(priv, skb_put(skb, len), len)) { dev_dbg(&priv->spi->dev, "frame reception failed\n"); kfree_skb(skb); return -EINVAL; } /* In promiscuous mode, we configure the radio to include the * CRC (AUTOCRC==0) and we pass on the packet unconditionally. If not * in promiscuous mode, we check the CRC here, but leave the * RSSI/LQI/CRC_OK bytes as they will get removed in the mac layer. */ if (!priv->promiscuous) { bool crc_ok; /* Check if the CRC is valid. With AUTOCRC set, the most * significant bit of the last byte returned from the CC2520 * is CRC_OK flag. See section 20.3.4 of the datasheet. */ crc_ok = skb->data[len - 1] & BIT(7); /* If we failed CRC drop the packet in the driver layer. */ if (!crc_ok) { dev_dbg(&priv->spi->dev, "CRC check failed\n"); kfree_skb(skb); return -EINVAL; } /* To calculate LQI, the lower 7 bits of the last byte (the * correlation value provided by the radio) must be scaled to * the range 0-255. According to section 20.6, the correlation * value ranges from 50-110. Ideally this would be calibrated * per hardware design, but we use roughly the datasheet values * to get close enough while avoiding floating point. */ lqi = skb->data[len - 1] & 0x7f; if (lqi < 50) lqi = 50; else if (lqi > 113) lqi = 113; lqi = (lqi - 50) * 4; } ieee802154_rx_irqsafe(priv->hw, skb, lqi); dev_vdbg(&priv->spi->dev, "RXFIFO: %x %x\n", len, lqi); return 0; } static int cc2520_ed(struct ieee802154_hw *hw, u8 *level) { struct cc2520_private *priv = hw->priv; u8 status = 0xff; u8 rssi; int ret; ret = cc2520_read_register(priv, CC2520_RSSISTAT, &status); if (ret) return ret; if (status != RSSI_VALID) return -EINVAL; ret = cc2520_read_register(priv, CC2520_RSSI, &rssi); if (ret) return ret; /* level = RSSI(rssi) - OFFSET [dBm] : offset is 76dBm */ *level = rssi - RSSI_OFFSET; return 0; } static int cc2520_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct cc2520_private *priv = hw->priv; int ret; dev_dbg(&priv->spi->dev, "trying to set channel\n"); WARN_ON(page != 0); WARN_ON(channel < CC2520_MINCHANNEL); WARN_ON(channel > CC2520_MAXCHANNEL); ret = cc2520_write_register(priv, CC2520_FREQCTRL, 11 + 5 * (channel - 11)); return ret; } static int cc2520_filter(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { struct cc2520_private *priv = hw->priv; int ret = 0; if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 panid = le16_to_cpu(filt->pan_id); dev_vdbg(&priv->spi->dev, "%s called for pan id\n", __func__); ret = cc2520_write_ram(priv, CC2520RAM_PANID, sizeof(panid), (u8 *)&panid); } if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { dev_vdbg(&priv->spi->dev, "%s called for IEEE addr\n", __func__); ret = cc2520_write_ram(priv, CC2520RAM_IEEEADDR, sizeof(filt->ieee_addr), (u8 *)&filt->ieee_addr); } if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); dev_vdbg(&priv->spi->dev, "%s called for saddr\n", __func__); ret = cc2520_write_ram(priv, CC2520RAM_SHORTADDR, sizeof(addr), (u8 *)&addr); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { u8 frmfilt0; dev_vdbg(&priv->spi->dev, "%s called for panc change\n", __func__); cc2520_read_register(priv, CC2520_FRMFILT0, &frmfilt0); if (filt->pan_coord) frmfilt0 |= FRMFILT0_PAN_COORDINATOR; else frmfilt0 &= ~FRMFILT0_PAN_COORDINATOR; ret = cc2520_write_register(priv, CC2520_FRMFILT0, frmfilt0); } return ret; } static inline int cc2520_set_tx_power(struct cc2520_private *priv, s32 mbm) { u8 power; switch (mbm) { case 500: power = 0xF7; break; case 300: power = 0xF2; break; case 200: power = 0xAB; break; case 100: power = 0x13; break; case 0: power = 0x32; break; case -200: power = 0x81; break; case -400: power = 0x88; break; case -700: power = 0x2C; break; case -1800: power = 0x03; break; default: return -EINVAL; } return cc2520_write_register(priv, CC2520_TXPOWER, power); } static inline int cc2520_cc2591_set_tx_power(struct cc2520_private *priv, s32 mbm) { u8 power; switch (mbm) { case 1700: power = 0xF9; break; case 1600: power = 0xF0; break; case 1400: power = 0xA0; break; case 1100: power = 0x2C; break; case -100: power = 0x03; break; case -800: power = 0x01; break; default: return -EINVAL; } return cc2520_write_register(priv, CC2520_TXPOWER, power); } #define CC2520_MAX_TX_POWERS 0x8 static const s32 cc2520_powers[CC2520_MAX_TX_POWERS + 1] = { 500, 300, 200, 100, 0, -200, -400, -700, -1800, }; #define CC2520_CC2591_MAX_TX_POWERS 0x5 static const s32 cc2520_cc2591_powers[CC2520_CC2591_MAX_TX_POWERS + 1] = { 1700, 1600, 1400, 1100, -100, -800, }; static int cc2520_set_txpower(struct ieee802154_hw *hw, s32 mbm) { struct cc2520_private *priv = hw->priv; if (!priv->amplified) return cc2520_set_tx_power(priv, mbm); return cc2520_cc2591_set_tx_power(priv, mbm); } static int cc2520_set_promiscuous_mode(struct ieee802154_hw *hw, bool on) { struct cc2520_private *priv = hw->priv; u8 frmfilt0; dev_dbg(&priv->spi->dev, "%s : mode %d\n", __func__, on); priv->promiscuous = on; cc2520_read_register(priv, CC2520_FRMFILT0, &frmfilt0); if (on) { /* Disable automatic ACK, automatic CRC, and frame filtering. */ cc2520_write_register(priv, CC2520_FRMCTRL0, 0); frmfilt0 &= ~FRMFILT0_FRAME_FILTER_EN; } else { cc2520_write_register(priv, CC2520_FRMCTRL0, FRMCTRL0_AUTOACK | FRMCTRL0_AUTOCRC); frmfilt0 |= FRMFILT0_FRAME_FILTER_EN; } return cc2520_write_register(priv, CC2520_FRMFILT0, frmfilt0); } static const struct ieee802154_ops cc2520_ops = { .owner = THIS_MODULE, .start = cc2520_start, .stop = cc2520_stop, .xmit_sync = cc2520_tx, .ed = cc2520_ed, .set_channel = cc2520_set_channel, .set_hw_addr_filt = cc2520_filter, .set_txpower = cc2520_set_txpower, .set_promiscuous_mode = cc2520_set_promiscuous_mode, }; static int cc2520_register(struct cc2520_private *priv) { int ret = -ENOMEM; priv->hw = ieee802154_alloc_hw(sizeof(*priv), &cc2520_ops); if (!priv->hw) goto err_ret; priv->hw->priv = priv; priv->hw->parent = &priv->spi->dev; priv->hw->extra_tx_headroom = 0; ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr); /* We do support only 2.4 Ghz */ priv->hw->phy->supported.channels[0] = 0x7FFF800; priv->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; priv->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER; if (!priv->amplified) { priv->hw->phy->supported.tx_powers = cc2520_powers; priv->hw->phy->supported.tx_powers_size = ARRAY_SIZE(cc2520_powers); priv->hw->phy->transmit_power = priv->hw->phy->supported.tx_powers[4]; } else { priv->hw->phy->supported.tx_powers = cc2520_cc2591_powers; priv->hw->phy->supported.tx_powers_size = ARRAY_SIZE(cc2520_cc2591_powers); priv->hw->phy->transmit_power = priv->hw->phy->supported.tx_powers[0]; } priv->hw->phy->current_channel = 11; dev_vdbg(&priv->spi->dev, "registered cc2520\n"); ret = ieee802154_register_hw(priv->hw); if (ret) goto err_free_device; return 0; err_free_device: ieee802154_free_hw(priv->hw); err_ret: return ret; } static void cc2520_fifop_irqwork(struct work_struct *work) { struct cc2520_private *priv = container_of(work, struct cc2520_private, fifop_irqwork); dev_dbg(&priv->spi->dev, "fifop interrupt received\n"); if (gpiod_get_value(priv->fifo_pin)) cc2520_rx(priv); else dev_dbg(&priv->spi->dev, "rxfifo overflow\n"); cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHRX); cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHRX); } static irqreturn_t cc2520_fifop_isr(int irq, void *data) { struct cc2520_private *priv = data; schedule_work(&priv->fifop_irqwork); return IRQ_HANDLED; } static irqreturn_t cc2520_sfd_isr(int irq, void *data) { struct cc2520_private *priv = data; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); if (priv->is_tx) { priv->is_tx = 0; spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(&priv->spi->dev, "SFD for TX\n"); complete(&priv->tx_complete); } else { spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(&priv->spi->dev, "SFD for RX\n"); } return IRQ_HANDLED; } static int cc2520_hw_init(struct cc2520_private *priv) { u8 status = 0, state = 0xff; int ret; int timeout = 100; ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state); if (ret) goto err_ret; if (state != STATE_IDLE) return -EINVAL; do { ret = cc2520_get_status(priv, &status); if (ret) goto err_ret; if (timeout-- <= 0) { dev_err(&priv->spi->dev, "oscillator start failed!\n"); return -ETIMEDOUT; } udelay(1); } while (!(status & CC2520_STATUS_XOSC32M_STABLE)); dev_vdbg(&priv->spi->dev, "oscillator brought up\n"); /* If the CC2520 is connected to a CC2591 amplifier, we must both * configure GPIOs on the CC2520 to correctly configure the CC2591 * and change a couple settings of the CC2520 to work with the * amplifier. See section 8 page 17 of TI application note AN065. * http://www.ti.com/lit/an/swra229a/swra229a.pdf */ if (priv->amplified) { ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x16); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_GPIOCTRL0, 0x46); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_GPIOCTRL5, 0x47); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_GPIOPOLARITY, 0x1e); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_TXCTRL, 0xc1); if (ret) goto err_ret; } else { ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11); if (ret) goto err_ret; } /* Registers default value: section 28.1 in Datasheet */ /* Set the CCA threshold to -50 dBm. This seems to have been copied * from the TinyOS CC2520 driver and is much higher than the -84 dBm * threshold suggested in the datasheet. */ ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_MDMCTRL0, 0x85); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_MDMCTRL1, 0x14); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_RXCTRL, 0x3f); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_FSCTRL, 0x5a); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_FSCAL1, 0x2b); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_ADCTEST0, 0x10); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_ADCTEST1, 0x0e); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_ADCTEST2, 0x03); if (ret) goto err_ret; /* Configure registers correctly for this driver. */ ret = cc2520_write_register(priv, CC2520_FRMCTRL1, FRMCTRL1_SET_RXENMASK_ON_TX | FRMCTRL1_IGNORE_TX_UNDERF); if (ret) goto err_ret; ret = cc2520_write_register(priv, CC2520_FIFOPCTRL, 127); if (ret) goto err_ret; return 0; err_ret: return ret; } static int cc2520_probe(struct spi_device *spi) { struct cc2520_private *priv; struct gpio_desc *fifop; struct gpio_desc *cca; struct gpio_desc *sfd; struct gpio_desc *reset; struct gpio_desc *vreg; int ret; priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; spi_set_drvdata(spi, priv); /* CC2591 front end for CC2520 */ /* Assumption that CC2591 is not connected */ priv->amplified = false; if (device_property_read_bool(&spi->dev, "amplified")) priv->amplified = true; priv->spi = spi; priv->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL); if (!priv->buf) return -ENOMEM; mutex_init(&priv->buffer_mutex); INIT_WORK(&priv->fifop_irqwork, cc2520_fifop_irqwork); spin_lock_init(&priv->lock); init_completion(&priv->tx_complete); /* Request all the gpio's */ priv->fifo_pin = devm_gpiod_get(&spi->dev, "fifo", GPIOD_IN); if (IS_ERR(priv->fifo_pin)) { dev_err(&spi->dev, "fifo gpio is not valid\n"); ret = PTR_ERR(priv->fifo_pin); goto err_hw_init; } cca = devm_gpiod_get(&spi->dev, "cca", GPIOD_IN); if (IS_ERR(cca)) { dev_err(&spi->dev, "cca gpio is not valid\n"); ret = PTR_ERR(cca); goto err_hw_init; } fifop = devm_gpiod_get(&spi->dev, "fifop", GPIOD_IN); if (IS_ERR(fifop)) { dev_err(&spi->dev, "fifop gpio is not valid\n"); ret = PTR_ERR(fifop); goto err_hw_init; } sfd = devm_gpiod_get(&spi->dev, "sfd", GPIOD_IN); if (IS_ERR(sfd)) { dev_err(&spi->dev, "sfd gpio is not valid\n"); ret = PTR_ERR(sfd); goto err_hw_init; } reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(reset)) { dev_err(&spi->dev, "reset gpio is not valid\n"); ret = PTR_ERR(reset); goto err_hw_init; } vreg = devm_gpiod_get(&spi->dev, "vreg", GPIOD_OUT_LOW); if (IS_ERR(vreg)) { dev_err(&spi->dev, "vreg gpio is not valid\n"); ret = PTR_ERR(vreg); goto err_hw_init; } gpiod_set_value(vreg, HIGH); usleep_range(100, 150); gpiod_set_value(reset, HIGH); usleep_range(200, 250); ret = cc2520_hw_init(priv); if (ret) goto err_hw_init; /* Set up fifop interrupt */ ret = devm_request_irq(&spi->dev, gpiod_to_irq(fifop), cc2520_fifop_isr, IRQF_TRIGGER_RISING, dev_name(&spi->dev), priv); if (ret) { dev_err(&spi->dev, "could not get fifop irq\n"); goto err_hw_init; } /* Set up sfd interrupt */ ret = devm_request_irq(&spi->dev, gpiod_to_irq(sfd), cc2520_sfd_isr, IRQF_TRIGGER_FALLING, dev_name(&spi->dev), priv); if (ret) { dev_err(&spi->dev, "could not get sfd irq\n"); goto err_hw_init; } ret = cc2520_register(priv); if (ret) goto err_hw_init; return 0; err_hw_init: mutex_destroy(&priv->buffer_mutex); flush_work(&priv->fifop_irqwork); return ret; } static void cc2520_remove(struct spi_device *spi) { struct cc2520_private *priv = spi_get_drvdata(spi); mutex_destroy(&priv->buffer_mutex); flush_work(&priv->fifop_irqwork); ieee802154_unregister_hw(priv->hw); ieee802154_free_hw(priv->hw); } static const struct spi_device_id cc2520_ids[] = { {"cc2520", }, {}, }; MODULE_DEVICE_TABLE(spi, cc2520_ids); static const struct of_device_id cc2520_of_ids[] = { {.compatible = "ti,cc2520", }, {}, }; MODULE_DEVICE_TABLE(of, cc2520_of_ids); /* SPI driver structure */ static struct spi_driver cc2520_driver = { .driver = { .name = "cc2520", .of_match_table = cc2520_of_ids, }, .id_table = cc2520_ids, .probe = cc2520_probe, .remove = cc2520_remove, }; module_spi_driver(cc2520_driver); MODULE_AUTHOR("Varka Bhadram <[email protected]>"); MODULE_DESCRIPTION("CC2520 Transceiver Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ieee802154/cc2520.c
// SPDX-License-Identifier: GPL-2.0-only /* * Loopback IEEE 802.15.4 interface * * Copyright 2007-2012 Siemens AG * * Written by: * Sergey Lapin <[email protected]> * Dmitry Eremin-Solenikov <[email protected]> * Alexander Smirnov <[email protected]> */ #include <linux/module.h> #include <linux/timer.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/device.h> #include <linux/spinlock.h> #include <net/mac802154.h> #include <net/cfg802154.h> static int numlbs = 2; static LIST_HEAD(fakelb_phys); static DEFINE_MUTEX(fakelb_phys_lock); static LIST_HEAD(fakelb_ifup_phys); static DEFINE_RWLOCK(fakelb_ifup_phys_lock); struct fakelb_phy { struct ieee802154_hw *hw; u8 page; u8 channel; bool suspended; struct list_head list; struct list_head list_ifup; }; static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) { WARN_ON(!level); *level = 0xbe; return 0; } static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct fakelb_phy *phy = hw->priv; write_lock_bh(&fakelb_ifup_phys_lock); phy->page = page; phy->channel = channel; write_unlock_bh(&fakelb_ifup_phys_lock); return 0; } static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct fakelb_phy *current_phy = hw->priv, *phy; read_lock_bh(&fakelb_ifup_phys_lock); WARN_ON(current_phy->suspended); list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) { if (current_phy == phy) continue; if (current_phy->page == phy->page && current_phy->channel == phy->channel) { struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC); if (newskb) ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc); } } read_unlock_bh(&fakelb_ifup_phys_lock); ieee802154_xmit_complete(hw, skb, false); return 0; } static int fakelb_hw_start(struct ieee802154_hw *hw) { struct fakelb_phy *phy = hw->priv; write_lock_bh(&fakelb_ifup_phys_lock); phy->suspended = false; list_add(&phy->list_ifup, &fakelb_ifup_phys); write_unlock_bh(&fakelb_ifup_phys_lock); return 0; } static void fakelb_hw_stop(struct ieee802154_hw *hw) { struct fakelb_phy *phy = hw->priv; write_lock_bh(&fakelb_ifup_phys_lock); phy->suspended = true; list_del(&phy->list_ifup); write_unlock_bh(&fakelb_ifup_phys_lock); } static int fakelb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { return 0; } static const struct ieee802154_ops fakelb_ops = { .owner = THIS_MODULE, .xmit_async = fakelb_hw_xmit, .ed = fakelb_hw_ed, .set_channel = fakelb_hw_channel, .start = fakelb_hw_start, .stop = fakelb_hw_stop, .set_promiscuous_mode = fakelb_set_promiscuous_mode, }; /* Number of dummy devices to be set up by this module. */ module_param(numlbs, int, 0); MODULE_PARM_DESC(numlbs, " number of pseudo devices"); static int fakelb_add_one(struct device *dev) { struct ieee802154_hw *hw; struct fakelb_phy *phy; int err; hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops); if (!hw) return -ENOMEM; phy = hw->priv; phy->hw = hw; /* 868 MHz BPSK 802.15.4-2003 */ hw->phy->supported.channels[0] |= 1; /* 915 MHz BPSK 802.15.4-2003 */ hw->phy->supported.channels[0] |= 0x7fe; /* 2.4 GHz O-QPSK 802.15.4-2003 */ hw->phy->supported.channels[0] |= 0x7FFF800; /* 868 MHz ASK 802.15.4-2006 */ hw->phy->supported.channels[1] |= 1; /* 915 MHz ASK 802.15.4-2006 */ hw->phy->supported.channels[1] |= 0x7fe; /* 868 MHz O-QPSK 802.15.4-2006 */ hw->phy->supported.channels[2] |= 1; /* 915 MHz O-QPSK 802.15.4-2006 */ hw->phy->supported.channels[2] |= 0x7fe; /* 2.4 GHz CSS 802.15.4a-2007 */ hw->phy->supported.channels[3] |= 0x3fff; /* UWB Sub-gigahertz 802.15.4a-2007 */ hw->phy->supported.channels[4] |= 1; /* UWB Low band 802.15.4a-2007 */ hw->phy->supported.channels[4] |= 0x1e; /* UWB High band 802.15.4a-2007 */ hw->phy->supported.channels[4] |= 0xffe0; /* 750 MHz O-QPSK 802.15.4c-2009 */ hw->phy->supported.channels[5] |= 0xf; /* 750 MHz MPSK 802.15.4c-2009 */ hw->phy->supported.channels[5] |= 0xf0; /* 950 MHz BPSK 802.15.4d-2009 */ hw->phy->supported.channels[6] |= 0x3ff; /* 950 MHz GFSK 802.15.4d-2009 */ hw->phy->supported.channels[6] |= 0x3ffc00; ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); /* fake phy channel 13 as default */ hw->phy->current_channel = 13; phy->channel = hw->phy->current_channel; hw->flags = IEEE802154_HW_PROMISCUOUS; hw->parent = dev; err = ieee802154_register_hw(hw); if (err) goto err_reg; mutex_lock(&fakelb_phys_lock); list_add_tail(&phy->list, &fakelb_phys); mutex_unlock(&fakelb_phys_lock); return 0; err_reg: ieee802154_free_hw(phy->hw); return err; } static void fakelb_del(struct fakelb_phy *phy) { list_del(&phy->list); ieee802154_unregister_hw(phy->hw); ieee802154_free_hw(phy->hw); } static int fakelb_probe(struct platform_device *pdev) { struct fakelb_phy *phy, *tmp; int err, i; for (i = 0; i < numlbs; i++) { err = fakelb_add_one(&pdev->dev); if (err < 0) goto err_slave; } dev_info(&pdev->dev, "added %i fake ieee802154 hardware devices\n", numlbs); return 0; err_slave: mutex_lock(&fakelb_phys_lock); list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) fakelb_del(phy); mutex_unlock(&fakelb_phys_lock); return err; } static int fakelb_remove(struct platform_device *pdev) { struct fakelb_phy *phy, *tmp; mutex_lock(&fakelb_phys_lock); list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) fakelb_del(phy); mutex_unlock(&fakelb_phys_lock); return 0; } static struct platform_device *ieee802154fake_dev; static struct platform_driver ieee802154fake_driver = { .probe = fakelb_probe, .remove = fakelb_remove, .driver = { .name = "ieee802154fakelb", }, }; static __init int fakelb_init_module(void) { ieee802154fake_dev = platform_device_register_simple( "ieee802154fakelb", -1, NULL, 0); pr_warn("fakelb driver is marked as deprecated, please use mac802154_hwsim!\n"); return platform_driver_register(&ieee802154fake_driver); } static __exit void fake_remove_module(void) { platform_driver_unregister(&ieee802154fake_driver); platform_device_unregister(ieee802154fake_dev); } module_init(fakelb_init_module); module_exit(fake_remove_module); MODULE_LICENSE("GPL");
linux-master
drivers/net/ieee802154/fakelb.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2022 Schneider Electric * * Clément Léger <[email protected]> */ #include <linux/clk.h> #include <linux/device.h> #include <linux/mdio.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/pcs-rzn1-miic.h> #include <linux/phylink.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <dt-bindings/net/pcs-rzn1-miic.h> #define MIIC_PRCMD 0x0 #define MIIC_ESID_CODE 0x4 #define MIIC_MODCTRL 0x20 #define MIIC_MODCTRL_SW_MODE GENMASK(4, 0) #define MIIC_CONVCTRL(port) (0x100 + (port) * 4) #define MIIC_CONVCTRL_CONV_SPEED GENMASK(1, 0) #define CONV_MODE_10MBPS 0 #define CONV_MODE_100MBPS 1 #define CONV_MODE_1000MBPS 2 #define MIIC_CONVCTRL_CONV_MODE GENMASK(3, 2) #define CONV_MODE_MII 0 #define CONV_MODE_RMII 1 #define CONV_MODE_RGMII 2 #define MIIC_CONVCTRL_FULLD BIT(8) #define MIIC_CONVCTRL_RGMII_LINK BIT(12) #define MIIC_CONVCTRL_RGMII_DUPLEX BIT(13) #define MIIC_CONVCTRL_RGMII_SPEED GENMASK(15, 14) #define MIIC_CONVRST 0x114 #define MIIC_CONVRST_PHYIF_RST(port) BIT(port) #define MIIC_CONVRST_PHYIF_RST_MASK GENMASK(4, 0) #define MIIC_SWCTRL 0x304 #define MIIC_SWDUPC 0x308 #define MIIC_MAX_NR_PORTS 5 #define MIIC_MODCTRL_CONF_CONV_NUM 6 #define MIIC_MODCTRL_CONF_NONE -1 /** * struct modctrl_match - Matching table entry for convctrl configuration * See section 8.2.1 of manual. * @mode_cfg: Configuration value for convctrl * @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN, * then index 1 - 5 are CONV1 - CONV5. */ struct modctrl_match { u32 mode_cfg; u8 conv[MIIC_MODCTRL_CONF_CONV_NUM]; }; static struct modctrl_match modctrl_match_table[] = { {0x0, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}}, {0x1, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, {0x2, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, {0x3, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}}, {0x8, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}}, {0x9, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, {0xA, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, {0xB, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}}, {0x10, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}}, {0x11, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, {0x12, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}}, {0x13, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD, MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}} }; static const char * const conf_to_string[] = { [MIIC_GMAC1_PORT] = "GMAC1_PORT", [MIIC_GMAC2_PORT] = "GMAC2_PORT", [MIIC_RTOS_PORT] = "RTOS_PORT", [MIIC_SERCOS_PORTA] = "SERCOS_PORTA", [MIIC_SERCOS_PORTB] = "SERCOS_PORTB", [MIIC_ETHERCAT_PORTA] = "ETHERCAT_PORTA", [MIIC_ETHERCAT_PORTB] = "ETHERCAT_PORTB", [MIIC_ETHERCAT_PORTC] = "ETHERCAT_PORTC", [MIIC_SWITCH_PORTA] = "SWITCH_PORTA", [MIIC_SWITCH_PORTB] = "SWITCH_PORTB", [MIIC_SWITCH_PORTC] = "SWITCH_PORTC", [MIIC_SWITCH_PORTD] = "SWITCH_PORTD", [MIIC_HSR_PORTA] = "HSR_PORTA", [MIIC_HSR_PORTB] = "HSR_PORTB", }; static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = { "SWITCH_PORTIN", "CONV1", "CONV2", "CONV3", "CONV4", "CONV5", }; /** * struct miic - MII converter structure * @base: base address of the MII converter * @dev: Device associated to the MII converter * @lock: Lock used for read-modify-write access */ struct miic { void __iomem *base; struct device *dev; spinlock_t lock; }; /** * struct miic_port - Per port MII converter struct * @miic: backiling to MII converter structure * @pcs: PCS structure associated to the port * @port: port number * @interface: interface mode of the port */ struct miic_port { struct miic *miic; struct phylink_pcs pcs; int port; phy_interface_t interface; }; static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs) { return container_of(pcs, struct miic_port, pcs); } static void miic_reg_writel(struct miic *miic, int offset, u32 value) { writel(value, miic->base + offset); } static u32 miic_reg_readl(struct miic *miic, int offset) { return readl(miic->base + offset); } static void miic_reg_rmw(struct miic *miic, int offset, u32 mask, u32 val) { u32 reg; spin_lock(&miic->lock); reg = miic_reg_readl(miic, offset); reg &= ~mask; reg |= val; miic_reg_writel(miic, offset, reg); spin_unlock(&miic->lock); } static void miic_converter_enable(struct miic *miic, int port, int enable) { u32 val = 0; if (enable) val = MIIC_CONVRST_PHYIF_RST(port); miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val); } static int miic_config(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, const unsigned long *advertising, bool permit) { struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs); struct miic *miic = miic_port->miic; u32 speed, conv_mode, val, mask; int port = miic_port->port; switch (interface) { case PHY_INTERFACE_MODE_RMII: conv_mode = CONV_MODE_RMII; speed = CONV_MODE_100MBPS; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_RGMII_RXID: conv_mode = CONV_MODE_RGMII; speed = CONV_MODE_1000MBPS; break; case PHY_INTERFACE_MODE_MII: conv_mode = CONV_MODE_MII; /* When in MII mode, speed should be set to 0 (which is actually * CONV_MODE_10MBPS) */ speed = CONV_MODE_10MBPS; break; default: return -EOPNOTSUPP; } val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, conv_mode); mask = MIIC_CONVCTRL_CONV_MODE; /* Update speed only if we are going to change the interface because * the link might already be up and it would break it if the speed is * changed. */ if (interface != miic_port->interface) { val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, speed); mask |= MIIC_CONVCTRL_CONV_SPEED; miic_port->interface = interface; } miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val); miic_converter_enable(miic, miic_port->port, 1); return 0; } static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex) { struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs); struct miic *miic = miic_port->miic; u32 conv_speed = 0, val = 0; int port = miic_port->port; if (duplex == DUPLEX_FULL) val |= MIIC_CONVCTRL_FULLD; /* No speed in MII through-mode */ if (interface != PHY_INTERFACE_MODE_MII) { switch (speed) { case SPEED_1000: conv_speed = CONV_MODE_1000MBPS; break; case SPEED_100: conv_speed = CONV_MODE_100MBPS; break; case SPEED_10: conv_speed = CONV_MODE_10MBPS; break; default: return; } } val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, conv_speed); miic_reg_rmw(miic, MIIC_CONVCTRL(port), (MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val); } static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state) { if (phy_interface_mode_is_rgmii(state->interface) || state->interface == PHY_INTERFACE_MODE_RMII || state->interface == PHY_INTERFACE_MODE_MII) return 1; return -EINVAL; } static const struct phylink_pcs_ops miic_phylink_ops = { .pcs_validate = miic_validate, .pcs_config = miic_config, .pcs_link_up = miic_link_up, }; struct phylink_pcs *miic_create(struct device *dev, struct device_node *np) { struct platform_device *pdev; struct miic_port *miic_port; struct device_node *pcs_np; struct miic *miic; u32 port; if (!of_device_is_available(np)) return ERR_PTR(-ENODEV); if (of_property_read_u32(np, "reg", &port)) return ERR_PTR(-EINVAL); if (port > MIIC_MAX_NR_PORTS || port < 1) return ERR_PTR(-EINVAL); /* The PCS pdev is attached to the parent node */ pcs_np = of_get_parent(np); if (!pcs_np) return ERR_PTR(-ENODEV); if (!of_device_is_available(pcs_np)) { of_node_put(pcs_np); return ERR_PTR(-ENODEV); } pdev = of_find_device_by_node(pcs_np); of_node_put(pcs_np); if (!pdev || !platform_get_drvdata(pdev)) { if (pdev) put_device(&pdev->dev); return ERR_PTR(-EPROBE_DEFER); } miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL); if (!miic_port) { put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } miic = platform_get_drvdata(pdev); device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER); put_device(&pdev->dev); miic_port->miic = miic; miic_port->port = port - 1; miic_port->pcs.ops = &miic_phylink_ops; return &miic_port->pcs; } EXPORT_SYMBOL(miic_create); void miic_destroy(struct phylink_pcs *pcs) { struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs); miic_converter_enable(miic_port->miic, miic_port->port, 0); kfree(miic_port); } EXPORT_SYMBOL(miic_destroy); static int miic_init_hw(struct miic *miic, u32 cfg_mode) { int port; /* Unlock write access to accessory registers (cf datasheet). If this * is going to be used in conjunction with the Cortex-M3, this sequence * will have to be moved in register write */ miic_reg_writel(miic, MIIC_PRCMD, 0x00A5); miic_reg_writel(miic, MIIC_PRCMD, 0x0001); miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE); miic_reg_writel(miic, MIIC_PRCMD, 0x0001); miic_reg_writel(miic, MIIC_MODCTRL, FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode)); for (port = 0; port < MIIC_MAX_NR_PORTS; port++) { miic_converter_enable(miic, port, 0); /* Disable speed/duplex control from these registers, datasheet * says switch registers should be used to setup switch port * speed and duplex. */ miic_reg_writel(miic, MIIC_SWCTRL, 0x0); miic_reg_writel(miic, MIIC_SWDUPC, 0x0); } return 0; } static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM], s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM]) { int i; for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) { if (dt_val[i] == MIIC_MODCTRL_CONF_NONE) continue; if (dt_val[i] != table_val[i]) return false; } return true; } static void miic_dump_conf(struct device *dev, s8 conf[MIIC_MODCTRL_CONF_CONV_NUM]) { const char *conf_name; int i; for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) { if (conf[i] != MIIC_MODCTRL_CONF_NONE) conf_name = conf_to_string[conf[i]]; else conf_name = "NONE"; dev_err(dev, "%s: %s\n", index_to_string[i], conf_name); } } static int miic_match_dt_conf(struct device *dev, s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM], u32 *mode_cfg) { struct modctrl_match *table_entry; int i; for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) { table_entry = &modctrl_match_table[i]; if (miic_modctrl_match(table_entry->conv, dt_val)) { *mode_cfg = table_entry->mode_cfg; return 0; } } dev_err(dev, "Failed to apply requested configuration\n"); miic_dump_conf(dev, dt_val); return -EINVAL; } static int miic_parse_dt(struct device *dev, u32 *mode_cfg) { s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM]; struct device_node *np = dev->of_node; struct device_node *conv; u32 conf; int port; memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val)); if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0) dt_val[0] = conf; for_each_child_of_node(np, conv) { if (of_property_read_u32(conv, "reg", &port)) continue; if (!of_device_is_available(conv)) continue; if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0) dt_val[port] = conf; } return miic_match_dt_conf(dev, dt_val, mode_cfg); } static int miic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct miic *miic; u32 mode_cfg; int ret; ret = miic_parse_dt(dev, &mode_cfg); if (ret < 0) return ret; miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL); if (!miic) return -ENOMEM; spin_lock_init(&miic->lock); miic->dev = dev; miic->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(miic->base)) return PTR_ERR(miic->base); ret = devm_pm_runtime_enable(dev); if (ret < 0) return ret; ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; ret = miic_init_hw(miic, mode_cfg); if (ret) goto disable_runtime_pm; /* miic_create() relies on that fact that data are attached to the * platform device to determine if the driver is ready so this needs to * be the last thing to be done after everything is initialized * properly. */ platform_set_drvdata(pdev, miic); return 0; disable_runtime_pm: pm_runtime_put(dev); return ret; } static int miic_remove(struct platform_device *pdev) { pm_runtime_put(&pdev->dev); return 0; } static const struct of_device_id miic_of_mtable[] = { { .compatible = "renesas,rzn1-miic" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, miic_of_mtable); static struct platform_driver miic_driver = { .driver = { .name = "rzn1_miic", .suppress_bind_attrs = true, .of_match_table = miic_of_mtable, }, .probe = miic_probe, .remove = miic_remove, }; module_platform_driver(miic_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Renesas MII converter PCS driver"); MODULE_AUTHOR("Clément Léger <[email protected]>");
linux-master
drivers/net/pcs/pcs-rzn1-miic.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates. * Synopsys DesignWare XPCS helpers * * Author: Jose Abreu <[email protected]> */ #include <linux/delay.h> #include <linux/pcs/pcs-xpcs.h> #include <linux/mdio.h> #include <linux/phylink.h> #include <linux/workqueue.h> #include "pcs-xpcs.h" #define phylink_pcs_to_xpcs(pl_pcs) \ container_of((pl_pcs), struct dw_xpcs, pcs) static const int xpcs_usxgmii_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT, __ETHTOOL_LINK_MODE_MASK_NBITS, }; static const int xpcs_10gkr_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, __ETHTOOL_LINK_MODE_MASK_NBITS, }; static const int xpcs_xlgmii_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, __ETHTOOL_LINK_MODE_MASK_NBITS, }; static const int xpcs_10gbaser_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseER_Full_BIT, __ETHTOOL_LINK_MODE_MASK_NBITS, }; static const int xpcs_sgmii_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT, __ETHTOOL_LINK_MODE_MASK_NBITS, }; static const int xpcs_1000basex_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT, __ETHTOOL_LINK_MODE_MASK_NBITS, }; static const int xpcs_2500basex_features[] = { ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseT_Full_BIT, __ETHTOOL_LINK_MODE_MASK_NBITS, }; static const phy_interface_t xpcs_usxgmii_interfaces[] = { PHY_INTERFACE_MODE_USXGMII, }; static const phy_interface_t xpcs_10gkr_interfaces[] = { PHY_INTERFACE_MODE_10GKR, }; static const phy_interface_t xpcs_xlgmii_interfaces[] = { PHY_INTERFACE_MODE_XLGMII, }; static const phy_interface_t xpcs_10gbaser_interfaces[] = { PHY_INTERFACE_MODE_10GBASER, }; static const phy_interface_t xpcs_sgmii_interfaces[] = { PHY_INTERFACE_MODE_SGMII, }; static const phy_interface_t xpcs_1000basex_interfaces[] = { PHY_INTERFACE_MODE_1000BASEX, }; static const phy_interface_t xpcs_2500basex_interfaces[] = { PHY_INTERFACE_MODE_2500BASEX, PHY_INTERFACE_MODE_MAX, }; enum { DW_XPCS_USXGMII, DW_XPCS_10GKR, DW_XPCS_XLGMII, DW_XPCS_10GBASER, DW_XPCS_SGMII, DW_XPCS_1000BASEX, DW_XPCS_2500BASEX, DW_XPCS_INTERFACE_MAX, }; struct xpcs_compat { const int *supported; const phy_interface_t *interface; int num_interfaces; int an_mode; int (*pma_config)(struct dw_xpcs *xpcs); }; struct xpcs_id { u32 id; u32 mask; const struct xpcs_compat *compat; }; static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id, phy_interface_t interface) { int i, j; for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) { const struct xpcs_compat *compat = &id->compat[i]; for (j = 0; j < compat->num_interfaces; j++) if (compat->interface[j] == interface) return compat; } return NULL; } int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface) { const struct xpcs_compat *compat; compat = xpcs_find_compat(xpcs->id, interface); if (!compat) return -ENODEV; return compat->an_mode; } EXPORT_SYMBOL_GPL(xpcs_get_an_mode); static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat, enum ethtool_link_mode_bit_indices linkmode) { int i; for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++) if (compat->supported[i] == linkmode) return true; return false; } #define xpcs_linkmode_supported(compat, mode) \ __xpcs_linkmode_supported(compat, ETHTOOL_LINK_MODE_ ## mode ## _BIT) int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg) { return mdiodev_c45_read(xpcs->mdiodev, dev, reg); } int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val) { return mdiodev_c45_write(xpcs->mdiodev, dev, reg, val); } static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set) { return mdiodev_c45_modify_changed(xpcs->mdiodev, dev, reg, mask, set); } static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg) { return xpcs_read(xpcs, dev, DW_VENDOR | reg); } static int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg, u16 val) { return xpcs_write(xpcs, dev, DW_VENDOR | reg, val); } int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg) { return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg); } int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val) { return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val); } static int xpcs_dev_flag(struct dw_xpcs *xpcs) { int ret, oui; ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1); if (ret < 0) return ret; oui = ret; ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2); if (ret < 0) return ret; ret = (ret >> 10) & 0x3F; oui |= ret << 16; if (oui == DW_OUI_WX) xpcs->dev_flag = DW_DEV_TXGBE; return 0; } static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev) { /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */ unsigned int retries = 12; int ret; do { msleep(50); ret = xpcs_read(xpcs, dev, MDIO_CTRL1); if (ret < 0) return ret; } while (ret & MDIO_CTRL1_RESET && --retries); return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0; } static int xpcs_soft_reset(struct dw_xpcs *xpcs, const struct xpcs_compat *compat) { int ret, dev; switch (compat->an_mode) { case DW_AN_C73: case DW_10GBASER: dev = MDIO_MMD_PCS; break; case DW_AN_C37_SGMII: case DW_2500BASEX: case DW_AN_C37_1000BASEX: dev = MDIO_MMD_VEND2; break; default: return -1; } ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET); if (ret < 0) return ret; return xpcs_poll_reset(xpcs, dev); } #define xpcs_warn(__xpcs, __state, __args...) \ ({ \ if ((__state)->link) \ dev_warn(&(__xpcs)->mdiodev->dev, ##__args); \ }) static int xpcs_read_fault_c73(struct dw_xpcs *xpcs, struct phylink_link_state *state, u16 pcs_stat1) { int ret; if (pcs_stat1 & MDIO_STAT1_FAULT) { xpcs_warn(xpcs, state, "Link fault condition detected!\n"); return -EFAULT; } ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT2); if (ret < 0) return ret; if (ret & MDIO_STAT2_RXFAULT) xpcs_warn(xpcs, state, "Receiver fault detected!\n"); if (ret & MDIO_STAT2_TXFAULT) xpcs_warn(xpcs, state, "Transmitter fault detected!\n"); ret = xpcs_read_vendor(xpcs, MDIO_MMD_PCS, DW_VR_XS_PCS_DIG_STS); if (ret < 0) return ret; if (ret & DW_RXFIFO_ERR) { xpcs_warn(xpcs, state, "FIFO fault condition detected!\n"); return -EFAULT; } ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1); if (ret < 0) return ret; if (!(ret & MDIO_PCS_10GBRT_STAT1_BLKLK)) xpcs_warn(xpcs, state, "Link is not locked!\n"); ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT2); if (ret < 0) return ret; if (ret & MDIO_PCS_10GBRT_STAT2_ERR) { xpcs_warn(xpcs, state, "Link has errors!\n"); return -EFAULT; } return 0; } static void xpcs_config_usxgmii(struct dw_xpcs *xpcs, int speed) { int ret, speed_sel; switch (speed) { case SPEED_10: speed_sel = DW_USXGMII_10; break; case SPEED_100: speed_sel = DW_USXGMII_100; break; case SPEED_1000: speed_sel = DW_USXGMII_1000; break; case SPEED_2500: speed_sel = DW_USXGMII_2500; break; case SPEED_5000: speed_sel = DW_USXGMII_5000; break; case SPEED_10000: speed_sel = DW_USXGMII_10000; break; default: /* Nothing to do here */ return; } ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1); if (ret < 0) goto out; ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_EN); if (ret < 0) goto out; ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1); if (ret < 0) goto out; ret &= ~DW_USXGMII_SS_MASK; ret |= speed_sel | DW_USXGMII_FULL; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret); if (ret < 0) goto out; ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1); if (ret < 0) goto out; ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST); if (ret < 0) goto out; return; out: pr_err("%s: XPCS access returned %pe\n", __func__, ERR_PTR(ret)); } static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs, const struct xpcs_compat *compat) { int ret, adv; /* By default, in USXGMII mode XPCS operates at 10G baud and * replicates data to achieve lower speeds. Hereby, in this * default configuration we need to advertise all supported * modes and not only the ones we want to use. */ /* SR_AN_ADV3 */ adv = 0; if (xpcs_linkmode_supported(compat, 2500baseX_Full)) adv |= DW_C73_2500KX; /* TODO: 5000baseKR */ ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV3, adv); if (ret < 0) return ret; /* SR_AN_ADV2 */ adv = 0; if (xpcs_linkmode_supported(compat, 1000baseKX_Full)) adv |= DW_C73_1000KX; if (xpcs_linkmode_supported(compat, 10000baseKX4_Full)) adv |= DW_C73_10000KX4; if (xpcs_linkmode_supported(compat, 10000baseKR_Full)) adv |= DW_C73_10000KR; ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV2, adv); if (ret < 0) return ret; /* SR_AN_ADV1 */ adv = DW_C73_AN_ADV_SF; if (xpcs_linkmode_supported(compat, Pause)) adv |= DW_C73_PAUSE; if (xpcs_linkmode_supported(compat, Asym_Pause)) adv |= DW_C73_ASYM_PAUSE; return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv); } static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs, const struct xpcs_compat *compat) { int ret; ret = _xpcs_config_aneg_c73(xpcs, compat); if (ret < 0) return ret; ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_CTRL1); if (ret < 0) return ret; ret |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART; return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret); } static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs, struct phylink_link_state *state, const struct xpcs_compat *compat, u16 an_stat1) { int ret; if (an_stat1 & MDIO_AN_STAT1_COMPLETE) { ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_AN_LPA); if (ret < 0) return ret; /* Check if Aneg outcome is valid */ if (!(ret & DW_C73_AN_ADV_SF)) { xpcs_config_aneg_c73(xpcs, compat); return 0; } return 1; } return 0; } static int xpcs_read_lpa_c73(struct dw_xpcs *xpcs, struct phylink_link_state *state, u16 an_stat1) { u16 lpa[3]; int i, ret; if (!(an_stat1 & MDIO_AN_STAT1_LPABLE)) { phylink_clear(state->lp_advertising, Autoneg); return 0; } phylink_set(state->lp_advertising, Autoneg); /* Read Clause 73 link partner advertisement */ for (i = ARRAY_SIZE(lpa); --i >= 0; ) { ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_AN_LPA + i); if (ret < 0) return ret; lpa[i] = ret; } mii_c73_mod_linkmode(state->lp_advertising, lpa); return 0; } static int xpcs_get_max_xlgmii_speed(struct dw_xpcs *xpcs, struct phylink_link_state *state) { unsigned long *adv = state->advertising; int speed = SPEED_UNKNOWN; int bit; for_each_set_bit(bit, adv, __ETHTOOL_LINK_MODE_MASK_NBITS) { int new_speed = SPEED_UNKNOWN; switch (bit) { case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT: case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT: case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT: new_speed = SPEED_25000; break; case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT: case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT: case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT: case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT: new_speed = SPEED_40000; break; case ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT: case ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT: case ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT: case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT: case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT: case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT: case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT: case ETHTOOL_LINK_MODE_50000baseDR_Full_BIT: new_speed = SPEED_50000; break; case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT: case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT: case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT: case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT: case ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT: case ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT: case ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT: case ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT: case ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT: new_speed = SPEED_100000; break; default: continue; } if (new_speed > speed) speed = new_speed; } return speed; } static void xpcs_resolve_pma(struct dw_xpcs *xpcs, struct phylink_link_state *state) { state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX; state->duplex = DUPLEX_FULL; switch (state->interface) { case PHY_INTERFACE_MODE_10GKR: state->speed = SPEED_10000; break; case PHY_INTERFACE_MODE_XLGMII: state->speed = xpcs_get_max_xlgmii_speed(xpcs, state); break; default: state->speed = SPEED_UNKNOWN; break; } } static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state) { __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported) = { 0, }; const struct xpcs_compat *compat; struct dw_xpcs *xpcs; int i; xpcs = phylink_pcs_to_xpcs(pcs); compat = xpcs_find_compat(xpcs->id, state->interface); /* Populate the supported link modes for this PHY interface type. * FIXME: what about the port modes and autoneg bit? This masks * all those away. */ if (compat) for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++) set_bit(compat->supported[i], xpcs_supported); linkmode_and(supported, supported, xpcs_supported); return 0; } void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces) { int i, j; for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) { const struct xpcs_compat *compat = &xpcs->id->compat[i]; for (j = 0; j < compat->num_interfaces; j++) if (compat->interface[j] < PHY_INTERFACE_MODE_MAX) __set_bit(compat->interface[j], interfaces); } } EXPORT_SYMBOL_GPL(xpcs_get_interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable) { int ret; ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0); if (ret < 0) return ret; if (enable) { /* Enable EEE */ ret = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | mult_fact_100ns << DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT; } else { ret &= ~(DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | DW_VR_MII_EEE_MULT_FACT_100NS); } ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, ret); if (ret < 0) return ret; ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1); if (ret < 0) return ret; if (enable) ret |= DW_VR_MII_EEE_TRN_LPI; else ret &= ~DW_VR_MII_EEE_TRN_LPI; return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret); } EXPORT_SYMBOL_GPL(xpcs_config_eee); static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int neg_mode) { int ret, mdio_ctrl, tx_conf; if (xpcs->dev_flag == DW_DEV_TXGBE) xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); /* For AN for C37 SGMII mode, the settings are :- * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case it is already enabled) * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN) * 3) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII) * DW xPCS used with DW EQoS MAC is always MAC side SGMII. * 4) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic * speed/duplex mode change by HW after SGMII AN complete) * 5) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 1b (Enable SGMII AN) * * Note: Since it is MAC side SGMII, there is no need to set * SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from * PHY about the link state change after C28 AN is completed * between PHY and Link Partner. There is also no need to * trigger AN restart for MAC-side SGMII. */ mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL); if (mdio_ctrl < 0) return mdio_ctrl; if (mdio_ctrl & AN_CL37_EN) { ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, mdio_ctrl & ~AN_CL37_EN); if (ret < 0) return ret; } ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL); if (ret < 0) return ret; ret &= ~(DW_VR_MII_PCS_MODE_MASK | DW_VR_MII_TX_CONFIG_MASK); ret |= (DW_VR_MII_PCS_MODE_C37_SGMII << DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT & DW_VR_MII_PCS_MODE_MASK); if (xpcs->dev_flag == DW_DEV_TXGBE) { ret |= DW_VR_MII_AN_CTRL_8BIT; /* Hardware requires it to be PHY side SGMII */ tx_conf = DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII; } else { tx_conf = DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII; } ret |= tx_conf << DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT & DW_VR_MII_TX_CONFIG_MASK; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); if (ret < 0) return ret; ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1); if (ret < 0) return ret; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; else ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; if (xpcs->dev_flag == DW_DEV_TXGBE) ret |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); if (ret < 0) return ret; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, mdio_ctrl | AN_CL37_EN); return ret; } static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, unsigned int neg_mode, const unsigned long *advertising) { phy_interface_t interface = PHY_INTERFACE_MODE_1000BASEX; int ret, mdio_ctrl, adv; bool changed = 0; if (xpcs->dev_flag == DW_DEV_TXGBE) xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); /* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must * be disabled first:- * 1) VR_MII_MMD_CTRL Bit(12)[AN_ENABLE] = 0b * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 00b (1000BASE-X C37) */ mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL); if (mdio_ctrl < 0) return mdio_ctrl; if (mdio_ctrl & AN_CL37_EN) { ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, mdio_ctrl & ~AN_CL37_EN); if (ret < 0) return ret; } ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL); if (ret < 0) return ret; ret &= ~DW_VR_MII_PCS_MODE_MASK; if (!xpcs->pcs.poll) ret |= DW_VR_MII_AN_INTR_EN; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); if (ret < 0) return ret; /* Check for advertising changes and update the C45 MII ADV * register accordingly. */ adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); if (adv >= 0) { ret = xpcs_modify_changed(xpcs, MDIO_MMD_VEND2, MII_ADVERTISE, 0xffff, adv); if (ret < 0) return ret; changed = ret; } /* Clear CL37 AN complete status */ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, 0); if (ret < 0) return ret; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, mdio_ctrl | AN_CL37_EN); if (ret < 0) return ret; } return changed; } static int xpcs_config_2500basex(struct dw_xpcs *xpcs) { int ret; ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1); if (ret < 0) return ret; ret |= DW_VR_MII_DIG_CTRL1_2G5_EN; ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); if (ret < 0) return ret; ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL); if (ret < 0) return ret; ret &= ~AN_CL37_EN; ret |= SGMII_SPEED_SS6; ret &= ~SGMII_SPEED_SS13; return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, ret); } int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, const unsigned long *advertising, unsigned int neg_mode) { const struct xpcs_compat *compat; int ret; compat = xpcs_find_compat(xpcs->id, interface); if (!compat) return -ENODEV; if (xpcs->dev_flag == DW_DEV_TXGBE) { ret = txgbe_xpcs_switch_mode(xpcs, interface); if (ret) return ret; } switch (compat->an_mode) { case DW_10GBASER: break; case DW_AN_C73: if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { ret = xpcs_config_aneg_c73(xpcs, compat); if (ret) return ret; } break; case DW_AN_C37_SGMII: ret = xpcs_config_aneg_c37_sgmii(xpcs, neg_mode); if (ret) return ret; break; case DW_AN_C37_1000BASEX: ret = xpcs_config_aneg_c37_1000basex(xpcs, neg_mode, advertising); if (ret) return ret; break; case DW_2500BASEX: ret = xpcs_config_2500basex(xpcs); if (ret) return ret; break; default: return -1; } if (compat->pma_config) { ret = compat->pma_config(xpcs); if (ret) return ret; } return 0; } EXPORT_SYMBOL_GPL(xpcs_do_config); static int xpcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac) { struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); return xpcs_do_config(xpcs, interface, advertising, neg_mode); } static int xpcs_get_state_c73(struct dw_xpcs *xpcs, struct phylink_link_state *state, const struct xpcs_compat *compat) { bool an_enabled; int pcs_stat1; int an_stat1; int ret; /* The link status bit is latching-low, so it is important to * avoid unnecessary re-reads of this register to avoid missing * a link-down event. */ pcs_stat1 = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1); if (pcs_stat1 < 0) { state->link = false; return pcs_stat1; } /* Link needs to be read first ... */ state->link = !!(pcs_stat1 & MDIO_STAT1_LSTATUS); /* ... and then we check the faults. */ ret = xpcs_read_fault_c73(xpcs, state, pcs_stat1); if (ret) { ret = xpcs_soft_reset(xpcs, compat); if (ret) return ret; state->link = 0; return xpcs_do_config(xpcs, state->interface, NULL, PHYLINK_PCS_NEG_INBAND_ENABLED); } /* There is no point doing anything else if the link is down. */ if (!state->link) return 0; an_enabled = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, state->advertising); if (an_enabled) { /* The link status bit is latching-low, so it is important to * avoid unnecessary re-reads of this register to avoid missing * a link-down event. */ an_stat1 = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1); if (an_stat1 < 0) { state->link = false; return an_stat1; } state->an_complete = xpcs_aneg_done_c73(xpcs, state, compat, an_stat1); if (!state->an_complete) { state->link = false; return 0; } ret = xpcs_read_lpa_c73(xpcs, state, an_stat1); if (ret < 0) { state->link = false; return ret; } phylink_resolve_c73(state); } else { xpcs_resolve_pma(xpcs, state); } return 0; } static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs, struct phylink_link_state *state) { int ret; /* Reset link_state */ state->link = false; state->speed = SPEED_UNKNOWN; state->duplex = DUPLEX_UNKNOWN; state->pause = 0; /* For C37 SGMII mode, we check DW_VR_MII_AN_INTR_STS for link * status, speed and duplex. */ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS); if (ret < 0) return ret; if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) { int speed_value; state->link = true; speed_value = (ret & DW_VR_MII_AN_STS_C37_ANSGM_SP) >> DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT; if (speed_value == DW_VR_MII_C37_ANSGM_SP_1000) state->speed = SPEED_1000; else if (speed_value == DW_VR_MII_C37_ANSGM_SP_100) state->speed = SPEED_100; else state->speed = SPEED_10; if (ret & DW_VR_MII_AN_STS_C37_ANSGM_FD) state->duplex = DUPLEX_FULL; else state->duplex = DUPLEX_HALF; } else if (ret == DW_VR_MII_AN_STS_C37_ANCMPLT_INTR) { int speed, duplex; state->link = true; speed = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1); if (speed < 0) return speed; speed &= SGMII_SPEED_SS13 | SGMII_SPEED_SS6; if (speed == SGMII_SPEED_SS6) state->speed = SPEED_1000; else if (speed == SGMII_SPEED_SS13) state->speed = SPEED_100; else if (speed == 0) state->speed = SPEED_10; duplex = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_ADVERTISE); if (duplex < 0) return duplex; if (duplex & DW_FULL_DUPLEX) state->duplex = DUPLEX_FULL; else if (duplex & DW_HALF_DUPLEX) state->duplex = DUPLEX_HALF; xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, 0); } return 0; } static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs, struct phylink_link_state *state) { int lpa, bmsr; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, state->advertising)) { /* Reset link state */ state->link = false; lpa = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_LPA); if (lpa < 0 || lpa & LPA_RFAULT) return lpa; bmsr = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_BMSR); if (bmsr < 0) return bmsr; /* Clear AN complete interrupt */ if (!xpcs->pcs.poll) { int an_intr; an_intr = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS); if (an_intr & DW_VR_MII_AN_STS_C37_ANCMPLT_INTR) { an_intr &= ~DW_VR_MII_AN_STS_C37_ANCMPLT_INTR; xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, an_intr); } } phylink_mii_c22_pcs_decode_state(state, bmsr, lpa); } return 0; } static void xpcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); const struct xpcs_compat *compat; int ret; compat = xpcs_find_compat(xpcs->id, state->interface); if (!compat) return; switch (compat->an_mode) { case DW_10GBASER: phylink_mii_c45_pcs_get_state(xpcs->mdiodev, state); break; case DW_AN_C73: ret = xpcs_get_state_c73(xpcs, state, compat); if (ret) { pr_err("xpcs_get_state_c73 returned %pe\n", ERR_PTR(ret)); return; } break; case DW_AN_C37_SGMII: ret = xpcs_get_state_c37_sgmii(xpcs, state); if (ret) { pr_err("xpcs_get_state_c37_sgmii returned %pe\n", ERR_PTR(ret)); } break; case DW_AN_C37_1000BASEX: ret = xpcs_get_state_c37_1000basex(xpcs, state); if (ret) { pr_err("xpcs_get_state_c37_1000basex returned %pe\n", ERR_PTR(ret)); } break; default: return; } } static void xpcs_link_up_sgmii(struct dw_xpcs *xpcs, unsigned int neg_mode, int speed, int duplex) { int val, ret; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) return; val = mii_bmcr_encode_fixed(speed, duplex); ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val); if (ret) pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret)); } static void xpcs_link_up_1000basex(struct dw_xpcs *xpcs, unsigned int neg_mode, int speed, int duplex) { int val, ret; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) return; switch (speed) { case SPEED_1000: val = BMCR_SPEED1000; break; case SPEED_100: case SPEED_10: default: pr_err("%s: speed = %d\n", __func__, speed); return; } if (duplex == DUPLEX_FULL) val |= BMCR_FULLDPLX; else pr_err("%s: half duplex not supported\n", __func__); ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val); if (ret) pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret)); } void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex) { struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); if (interface == PHY_INTERFACE_MODE_USXGMII) return xpcs_config_usxgmii(xpcs, speed); if (interface == PHY_INTERFACE_MODE_SGMII) return xpcs_link_up_sgmii(xpcs, neg_mode, speed, duplex); if (interface == PHY_INTERFACE_MODE_1000BASEX) return xpcs_link_up_1000basex(xpcs, neg_mode, speed, duplex); } EXPORT_SYMBOL_GPL(xpcs_link_up); static void xpcs_an_restart(struct phylink_pcs *pcs) { struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); int ret; ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1); if (ret >= 0) { ret |= BMCR_ANRESTART; xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret); } } static u32 xpcs_get_id(struct dw_xpcs *xpcs) { int ret; u32 id; /* First, search C73 PCS using PCS MMD */ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1); if (ret < 0) return 0xffffffff; id = ret << 16; ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID2); if (ret < 0) return 0xffffffff; /* If Device IDs are not all zeros or all ones, * we found C73 AN-type device */ if ((id | ret) && (id | ret) != 0xffffffff) return id | ret; /* Next, search C37 PCS using Vendor-Specific MII MMD */ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1); if (ret < 0) return 0xffffffff; id = ret << 16; ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2); if (ret < 0) return 0xffffffff; /* If Device IDs are not all zeros, we found C37 AN-type device */ if (id | ret) return id | ret; return 0xffffffff; } static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { [DW_XPCS_USXGMII] = { .supported = xpcs_usxgmii_features, .interface = xpcs_usxgmii_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_usxgmii_interfaces), .an_mode = DW_AN_C73, }, [DW_XPCS_10GKR] = { .supported = xpcs_10gkr_features, .interface = xpcs_10gkr_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_10gkr_interfaces), .an_mode = DW_AN_C73, }, [DW_XPCS_XLGMII] = { .supported = xpcs_xlgmii_features, .interface = xpcs_xlgmii_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_xlgmii_interfaces), .an_mode = DW_AN_C73, }, [DW_XPCS_10GBASER] = { .supported = xpcs_10gbaser_features, .interface = xpcs_10gbaser_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_10gbaser_interfaces), .an_mode = DW_10GBASER, }, [DW_XPCS_SGMII] = { .supported = xpcs_sgmii_features, .interface = xpcs_sgmii_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces), .an_mode = DW_AN_C37_SGMII, }, [DW_XPCS_1000BASEX] = { .supported = xpcs_1000basex_features, .interface = xpcs_1000basex_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_1000basex_interfaces), .an_mode = DW_AN_C37_1000BASEX, }, [DW_XPCS_2500BASEX] = { .supported = xpcs_2500basex_features, .interface = xpcs_2500basex_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces), .an_mode = DW_2500BASEX, }, }; static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { [DW_XPCS_SGMII] = { .supported = xpcs_sgmii_features, .interface = xpcs_sgmii_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces), .an_mode = DW_AN_C37_SGMII, .pma_config = nxp_sja1105_sgmii_pma_config, }, }; static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { [DW_XPCS_SGMII] = { .supported = xpcs_sgmii_features, .interface = xpcs_sgmii_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces), .an_mode = DW_AN_C37_SGMII, .pma_config = nxp_sja1110_sgmii_pma_config, }, [DW_XPCS_2500BASEX] = { .supported = xpcs_2500basex_features, .interface = xpcs_2500basex_interfaces, .num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces), .an_mode = DW_2500BASEX, .pma_config = nxp_sja1110_2500basex_pma_config, }, }; static const struct xpcs_id xpcs_id_list[] = { { .id = SYNOPSYS_XPCS_ID, .mask = SYNOPSYS_XPCS_MASK, .compat = synopsys_xpcs_compat, }, { .id = NXP_SJA1105_XPCS_ID, .mask = SYNOPSYS_XPCS_MASK, .compat = nxp_sja1105_xpcs_compat, }, { .id = NXP_SJA1110_XPCS_ID, .mask = SYNOPSYS_XPCS_MASK, .compat = nxp_sja1110_xpcs_compat, }, }; static const struct phylink_pcs_ops xpcs_phylink_ops = { .pcs_validate = xpcs_validate, .pcs_config = xpcs_config, .pcs_get_state = xpcs_get_state, .pcs_an_restart = xpcs_an_restart, .pcs_link_up = xpcs_link_up, }; static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, phy_interface_t interface) { struct dw_xpcs *xpcs; u32 xpcs_id; int i, ret; xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL); if (!xpcs) return ERR_PTR(-ENOMEM); mdio_device_get(mdiodev); xpcs->mdiodev = mdiodev; xpcs_id = xpcs_get_id(xpcs); for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) { const struct xpcs_id *entry = &xpcs_id_list[i]; const struct xpcs_compat *compat; if ((xpcs_id & entry->mask) != entry->id) continue; xpcs->id = entry; compat = xpcs_find_compat(entry, interface); if (!compat) { ret = -ENODEV; goto out; } ret = xpcs_dev_flag(xpcs); if (ret) goto out; xpcs->pcs.ops = &xpcs_phylink_ops; xpcs->pcs.neg_mode = true; if (xpcs->dev_flag != DW_DEV_TXGBE) { xpcs->pcs.poll = true; ret = xpcs_soft_reset(xpcs, compat); if (ret) goto out; } return xpcs; } ret = -ENODEV; out: mdio_device_put(mdiodev); kfree(xpcs); return ERR_PTR(ret); } void xpcs_destroy(struct dw_xpcs *xpcs) { if (xpcs) mdio_device_put(xpcs->mdiodev); kfree(xpcs); } EXPORT_SYMBOL_GPL(xpcs_destroy); struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, phy_interface_t interface) { struct mdio_device *mdiodev; struct dw_xpcs *xpcs; mdiodev = mdio_device_create(bus, addr); if (IS_ERR(mdiodev)) return ERR_CAST(mdiodev); xpcs = xpcs_create(mdiodev, interface); /* xpcs_create() has taken a refcount on the mdiodev if it was * successful. If xpcs_create() fails, this will free the mdio * device here. In any case, we don't need to hold our reference * anymore, and putting it here will allow mdio_device_put() in * xpcs_destroy() to automatically free the mdio device. */ mdio_device_put(mdiodev); return xpcs; } EXPORT_SYMBOL_GPL(xpcs_create_mdiodev); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/pcs/pcs-xpcs.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright 2021 NXP */ #include <linux/pcs/pcs-xpcs.h> #include "pcs-xpcs.h" /* LANE_DRIVER1_0 register */ #define SJA1110_LANE_DRIVER1_0 0x8038 #define SJA1110_TXDRV(x) (((x) << 12) & GENMASK(14, 12)) /* LANE_DRIVER2_0 register */ #define SJA1110_LANE_DRIVER2_0 0x803a #define SJA1110_TXDRVTRIM_LSB(x) ((x) & GENMASK_ULL(15, 0)) /* LANE_DRIVER2_1 register */ #define SJA1110_LANE_DRIVER2_1 0x803b #define SJA1110_LANE_DRIVER2_1_RSV BIT(9) #define SJA1110_TXDRVTRIM_MSB(x) (((x) & GENMASK_ULL(23, 16)) >> 16) /* LANE_TRIM register */ #define SJA1110_LANE_TRIM 0x8040 #define SJA1110_TXTEN BIT(11) #define SJA1110_TXRTRIM(x) (((x) << 8) & GENMASK(10, 8)) #define SJA1110_TXPLL_BWSEL BIT(7) #define SJA1110_RXTEN BIT(6) #define SJA1110_RXRTRIM(x) (((x) << 3) & GENMASK(5, 3)) #define SJA1110_CDR_GAIN BIT(2) #define SJA1110_ACCOUPLE_RXVCM_EN BIT(0) /* LANE_DATAPATH_1 register */ #define SJA1110_LANE_DATAPATH_1 0x8037 /* POWERDOWN_ENABLE register */ #define SJA1110_POWERDOWN_ENABLE 0x8041 #define SJA1110_TXPLL_PD BIT(12) #define SJA1110_TXPD BIT(11) #define SJA1110_RXPKDETEN BIT(10) #define SJA1110_RXCH_PD BIT(9) #define SJA1110_RXBIAS_PD BIT(8) #define SJA1110_RESET_SER_EN BIT(7) #define SJA1110_RESET_SER BIT(6) #define SJA1110_RESET_DES BIT(5) #define SJA1110_RCVEN BIT(4) /* RXPLL_CTRL0 register */ #define SJA1110_RXPLL_CTRL0 0x8065 #define SJA1110_RXPLL_FBDIV(x) (((x) << 2) & GENMASK(9, 2)) /* RXPLL_CTRL1 register */ #define SJA1110_RXPLL_CTRL1 0x8066 #define SJA1110_RXPLL_REFDIV(x) ((x) & GENMASK(4, 0)) /* TXPLL_CTRL0 register */ #define SJA1110_TXPLL_CTRL0 0x806d #define SJA1110_TXPLL_FBDIV(x) ((x) & GENMASK(11, 0)) /* TXPLL_CTRL1 register */ #define SJA1110_TXPLL_CTRL1 0x806e #define SJA1110_TXPLL_REFDIV(x) ((x) & GENMASK(5, 0)) /* RX_DATA_DETECT register */ #define SJA1110_RX_DATA_DETECT 0x8045 /* RX_CDR_CTLE register */ #define SJA1110_RX_CDR_CTLE 0x8042 /* In NXP SJA1105, the PCS is integrated with a PMA that has the TX lane * polarity inverted by default (PLUS is MINUS, MINUS is PLUS). To obtain * normal non-inverted behavior, the TX lane polarity must be inverted in the * PCS, via the DIGITAL_CONTROL_2 register. */ int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs) { return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL2, DW_VR_MII_DIG_CTRL2_TX_POL_INV); } static int nxp_sja1110_pma_config(struct dw_xpcs *xpcs, u16 txpll_fbdiv, u16 txpll_refdiv, u16 rxpll_fbdiv, u16 rxpll_refdiv, u16 rx_cdr_ctle) { u16 val; int ret; /* Program TX PLL feedback divider and reference divider settings for * correct oscillation frequency. */ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_TXPLL_CTRL0, SJA1110_TXPLL_FBDIV(txpll_fbdiv)); if (ret < 0) return ret; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_TXPLL_CTRL1, SJA1110_TXPLL_REFDIV(txpll_refdiv)); if (ret < 0) return ret; /* Program transmitter amplitude and disable amplitude trimming */ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER1_0, SJA1110_TXDRV(0x5)); if (ret < 0) return ret; val = SJA1110_TXDRVTRIM_LSB(0xffffffull); ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER2_0, val); if (ret < 0) return ret; val = SJA1110_TXDRVTRIM_MSB(0xffffffull) | SJA1110_LANE_DRIVER2_1_RSV; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER2_1, val); if (ret < 0) return ret; /* Enable input and output resistor terminations for low BER. */ val = SJA1110_ACCOUPLE_RXVCM_EN | SJA1110_CDR_GAIN | SJA1110_RXRTRIM(4) | SJA1110_RXTEN | SJA1110_TXPLL_BWSEL | SJA1110_TXRTRIM(3) | SJA1110_TXTEN; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_TRIM, val); if (ret < 0) return ret; /* Select PCS as transmitter data source. */ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DATAPATH_1, 0); if (ret < 0) return ret; /* Program RX PLL feedback divider and reference divider for correct * oscillation frequency. */ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RXPLL_CTRL0, SJA1110_RXPLL_FBDIV(rxpll_fbdiv)); if (ret < 0) return ret; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RXPLL_CTRL1, SJA1110_RXPLL_REFDIV(rxpll_refdiv)); if (ret < 0) return ret; /* Program threshold for receiver signal detector. * Enable control of RXPLL by receiver signal detector to disable RXPLL * when an input signal is not present. */ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RX_DATA_DETECT, 0x0005); if (ret < 0) return ret; /* Enable TX and RX PLLs and circuits. * Release reset of PMA to enable data flow to/from PCS. */ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE); if (ret < 0) return ret; val = ret & ~(SJA1110_TXPLL_PD | SJA1110_TXPD | SJA1110_RXCH_PD | SJA1110_RXBIAS_PD | SJA1110_RESET_SER_EN | SJA1110_RESET_SER | SJA1110_RESET_DES); val |= SJA1110_RXPKDETEN | SJA1110_RCVEN; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE, val); if (ret < 0) return ret; /* Program continuous-time linear equalizer (CTLE) settings. */ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RX_CDR_CTLE, rx_cdr_ctle); if (ret < 0) return ret; return 0; } int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs) { return nxp_sja1110_pma_config(xpcs, 0x19, 0x1, 0x19, 0x1, 0x212a); } int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs) { return nxp_sja1110_pma_config(xpcs, 0x7d, 0x2, 0x7d, 0x2, 0x732a); }
linux-master
drivers/net/pcs/pcs-xpcs-nxp.c
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2020 NXP * Lynx PCS MDIO helpers */ #include <linux/mdio.h> #include <linux/phylink.h> #include <linux/pcs-lynx.h> #include <linux/property.h> #define SGMII_CLOCK_PERIOD_NS 8 /* PCS is clocked at 125 MHz */ #define LINK_TIMER_VAL(ns) ((u32)((ns) / SGMII_CLOCK_PERIOD_NS)) #define LINK_TIMER_LO 0x12 #define LINK_TIMER_HI 0x13 #define IF_MODE 0x14 #define IF_MODE_SGMII_EN BIT(0) #define IF_MODE_USE_SGMII_AN BIT(1) #define IF_MODE_SPEED(x) (((x) << 2) & GENMASK(3, 2)) #define IF_MODE_SPEED_MSK GENMASK(3, 2) #define IF_MODE_HALF_DUPLEX BIT(4) struct lynx_pcs { struct phylink_pcs pcs; struct mdio_device *mdio; }; enum sgmii_speed { SGMII_SPEED_10 = 0, SGMII_SPEED_100 = 1, SGMII_SPEED_1000 = 2, SGMII_SPEED_2500 = 2, }; #define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs) #define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs) static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs, struct phylink_link_state *state) { struct mii_bus *bus = pcs->bus; int addr = pcs->addr; int status, lpa; status = mdiobus_c45_read(bus, addr, MDIO_MMD_VEND2, MII_BMSR); if (status < 0) return; state->link = !!(status & MDIO_STAT1_LSTATUS); state->an_complete = !!(status & MDIO_AN_STAT1_COMPLETE); if (!state->link || !state->an_complete) return; lpa = mdiobus_c45_read(bus, addr, MDIO_MMD_VEND2, MII_LPA); if (lpa < 0) return; phylink_decode_usxgmii_word(state, lpa); } static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs, struct phylink_link_state *state) { int bmsr, lpa; bmsr = mdiodev_read(pcs, MII_BMSR); lpa = mdiodev_read(pcs, MII_LPA); if (bmsr < 0 || lpa < 0) { state->link = false; return; } state->link = !!(bmsr & BMSR_LSTATUS); state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); if (!state->link) return; state->speed = SPEED_2500; state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX; state->duplex = DUPLEX_FULL; } static void lynx_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs); switch (state->interface) { case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: phylink_mii_c22_pcs_get_state(lynx->mdio, state); break; case PHY_INTERFACE_MODE_2500BASEX: lynx_pcs_get_state_2500basex(lynx->mdio, state); break; case PHY_INTERFACE_MODE_USXGMII: lynx_pcs_get_state_usxgmii(lynx->mdio, state); break; case PHY_INTERFACE_MODE_10GBASER: phylink_mii_c45_pcs_get_state(lynx->mdio, state); break; default: break; } dev_dbg(&lynx->mdio->dev, "mode=%s/%s/%s link=%u an_complete=%u\n", phy_modes(state->interface), phy_speed_to_str(state->speed), phy_duplex_to_str(state->duplex), state->link, state->an_complete); } static int lynx_pcs_config_giga(struct mdio_device *pcs, phy_interface_t interface, const unsigned long *advertising, unsigned int neg_mode) { int link_timer_ns; u32 link_timer; u16 if_mode; int err; link_timer_ns = phylink_get_link_timer_ns(interface); if (link_timer_ns > 0) { link_timer = LINK_TIMER_VAL(link_timer_ns); mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff); mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16); } if (interface == PHY_INTERFACE_MODE_1000BASEX) { if_mode = 0; } else { /* SGMII and QSGMII */ if_mode = IF_MODE_SGMII_EN; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) if_mode |= IF_MODE_USE_SGMII_AN; } err = mdiodev_modify(pcs, IF_MODE, IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN, if_mode); if (err) return err; return phylink_mii_c22_pcs_config(pcs, interface, advertising, neg_mode); } static int lynx_pcs_config_usxgmii(struct mdio_device *pcs, const unsigned long *advertising, unsigned int neg_mode) { struct mii_bus *bus = pcs->bus; int addr = pcs->addr; if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED) { dev_err(&pcs->dev, "USXGMII only supports in-band AN for now\n"); return -EOPNOTSUPP; } /* Configure device ability for the USXGMII Replicator */ return mdiobus_c45_write(bus, addr, MDIO_MMD_VEND2, MII_ADVERTISE, MDIO_USXGMII_10G | MDIO_USXGMII_LINK | MDIO_USXGMII_FULL_DUPLEX | ADVERTISE_SGMII | ADVERTISE_LPACK); } static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t ifmode, const unsigned long *advertising, bool permit) { struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs); switch (ifmode) { case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: return lynx_pcs_config_giga(lynx->mdio, ifmode, advertising, neg_mode); case PHY_INTERFACE_MODE_2500BASEX: if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { dev_err(&lynx->mdio->dev, "AN not supported on 3.125GHz SerDes lane\n"); return -EOPNOTSUPP; } break; case PHY_INTERFACE_MODE_USXGMII: return lynx_pcs_config_usxgmii(lynx->mdio, advertising, neg_mode); case PHY_INTERFACE_MODE_10GBASER: /* Nothing to do here for 10GBASER */ break; default: return -EOPNOTSUPP; } return 0; } static void lynx_pcs_an_restart(struct phylink_pcs *pcs) { struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs); phylink_mii_c22_pcs_an_restart(lynx->mdio); } static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int neg_mode, int speed, int duplex) { u16 if_mode = 0, sgmii_speed; /* The PCS needs to be configured manually only * when not operating on in-band mode */ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) return; if (duplex == DUPLEX_HALF) if_mode |= IF_MODE_HALF_DUPLEX; switch (speed) { case SPEED_1000: sgmii_speed = SGMII_SPEED_1000; break; case SPEED_100: sgmii_speed = SGMII_SPEED_100; break; case SPEED_10: sgmii_speed = SGMII_SPEED_10; break; case SPEED_UNKNOWN: /* Silently don't do anything */ return; default: dev_err(&pcs->dev, "Invalid PCS speed %d\n", speed); return; } if_mode |= IF_MODE_SPEED(sgmii_speed); mdiodev_modify(pcs, IF_MODE, IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK, if_mode); } /* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have * auto-negotiation of any link parameters. Electrically it is compatible with * a single lane of XAUI. * The hardware reference manual wants to call this mode SGMII, but it isn't * really, since the fundamental features of SGMII: * - Downgrading the link speed by duplicating symbols * - Auto-negotiation * are not there. * The speed is configured at 1000 in the IF_MODE because the clock frequency * is actually given by a PLL configured in the Reset Configuration Word (RCW). * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a * lower link speed on line side, the system-side interface remains fixed at * 2500 Mbps and we do rate adaptation through pause frames. */ static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs, unsigned int neg_mode, int speed, int duplex) { u16 if_mode = 0; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { dev_err(&pcs->dev, "AN not supported for 2500BaseX\n"); return; } if (duplex == DUPLEX_HALF) if_mode |= IF_MODE_HALF_DUPLEX; if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500); mdiodev_modify(pcs, IF_MODE, IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK, if_mode); } static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex) { struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs); switch (interface) { case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: lynx_pcs_link_up_sgmii(lynx->mdio, neg_mode, speed, duplex); break; case PHY_INTERFACE_MODE_2500BASEX: lynx_pcs_link_up_2500basex(lynx->mdio, neg_mode, speed, duplex); break; case PHY_INTERFACE_MODE_USXGMII: /* At the moment, only in-band AN is supported for USXGMII * so nothing to do in link_up */ break; default: break; } } static const struct phylink_pcs_ops lynx_pcs_phylink_ops = { .pcs_get_state = lynx_pcs_get_state, .pcs_config = lynx_pcs_config, .pcs_an_restart = lynx_pcs_an_restart, .pcs_link_up = lynx_pcs_link_up, }; static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio) { struct lynx_pcs *lynx; lynx = kzalloc(sizeof(*lynx), GFP_KERNEL); if (!lynx) return ERR_PTR(-ENOMEM); mdio_device_get(mdio); lynx->mdio = mdio; lynx->pcs.ops = &lynx_pcs_phylink_ops; lynx->pcs.neg_mode = true; lynx->pcs.poll = true; return lynx_to_phylink_pcs(lynx); } struct phylink_pcs *lynx_pcs_create_mdiodev(struct mii_bus *bus, int addr) { struct mdio_device *mdio; struct phylink_pcs *pcs; mdio = mdio_device_create(bus, addr); if (IS_ERR(mdio)) return ERR_CAST(mdio); pcs = lynx_pcs_create(mdio); /* lynx_create() has taken a refcount on the mdiodev if it was * successful. If lynx_create() fails, this will free the mdio * device here. In any case, we don't need to hold our reference * anymore, and putting it here will allow mdio_device_put() in * lynx_destroy() to automatically free the mdio device. */ mdio_device_put(mdio); return pcs; } EXPORT_SYMBOL(lynx_pcs_create_mdiodev); /* * lynx_pcs_create_fwnode() creates a lynx PCS instance from the fwnode * device indicated by node. * * Returns: * -ENODEV if the fwnode is marked unavailable * -EPROBE_DEFER if we fail to find the device * -ENOMEM if we fail to allocate memory * pointer to a phylink_pcs on success */ struct phylink_pcs *lynx_pcs_create_fwnode(struct fwnode_handle *node) { struct mdio_device *mdio; struct phylink_pcs *pcs; if (!fwnode_device_is_available(node)) return ERR_PTR(-ENODEV); mdio = fwnode_mdio_find_device(node); if (!mdio) return ERR_PTR(-EPROBE_DEFER); pcs = lynx_pcs_create(mdio); /* lynx_create() has taken a refcount on the mdiodev if it was * successful. If lynx_create() fails, this will free the mdio * device here. In any case, we don't need to hold our reference * anymore, and putting it here will allow mdio_device_put() in * lynx_destroy() to automatically free the mdio device. */ mdio_device_put(mdio); return pcs; } EXPORT_SYMBOL_GPL(lynx_pcs_create_fwnode); void lynx_pcs_destroy(struct phylink_pcs *pcs) { struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs); mdio_device_put(lynx->mdio); kfree(lynx); } EXPORT_SYMBOL(lynx_pcs_destroy); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/net/pcs/pcs-lynx.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ #include <linux/pcs/pcs-xpcs.h> #include <linux/mdio.h> #include "pcs-xpcs.h" /* VR_XS_PMA_MMD */ #define TXGBE_PMA_MMD 0x8020 #define TXGBE_TX_GENCTL1 0x11 #define TXGBE_TX_GENCTL1_VBOOST_LVL GENMASK(10, 8) #define TXGBE_TX_GENCTL1_VBOOST_EN0 BIT(4) #define TXGBE_TX_GEN_CTL2 0x12 #define TXGBE_TX_GEN_CTL2_TX0_WIDTH(v) FIELD_PREP(GENMASK(9, 8), v) #define TXGBE_TX_RATE_CTL 0x14 #define TXGBE_TX_RATE_CTL_TX0_RATE(v) FIELD_PREP(GENMASK(2, 0), v) #define TXGBE_RX_GEN_CTL2 0x32 #define TXGBE_RX_GEN_CTL2_RX0_WIDTH(v) FIELD_PREP(GENMASK(9, 8), v) #define TXGBE_RX_GEN_CTL3 0x33 #define TXGBE_RX_GEN_CTL3_LOS_TRSHLD0 GENMASK(2, 0) #define TXGBE_RX_RATE_CTL 0x34 #define TXGBE_RX_RATE_CTL_RX0_RATE(v) FIELD_PREP(GENMASK(1, 0), v) #define TXGBE_RX_EQ_ATTN_CTL 0x37 #define TXGBE_RX_EQ_ATTN_LVL0 GENMASK(2, 0) #define TXGBE_RX_EQ_CTL0 0x38 #define TXGBE_RX_EQ_CTL0_VGA1_GAIN(v) FIELD_PREP(GENMASK(15, 12), v) #define TXGBE_RX_EQ_CTL0_VGA2_GAIN(v) FIELD_PREP(GENMASK(11, 8), v) #define TXGBE_RX_EQ_CTL0_CTLE_POLE(v) FIELD_PREP(GENMASK(7, 5), v) #define TXGBE_RX_EQ_CTL0_CTLE_BOOST(v) FIELD_PREP(GENMASK(4, 0), v) #define TXGBE_RX_EQ_CTL4 0x3C #define TXGBE_RX_EQ_CTL4_CONT_OFF_CAN0 BIT(4) #define TXGBE_RX_EQ_CTL4_CONT_ADAPT0 BIT(0) #define TXGBE_AFE_DFE_ENABLE 0x3D #define TXGBE_DFE_EN_0 BIT(4) #define TXGBE_AFE_EN_0 BIT(0) #define TXGBE_DFE_TAP_CTL0 0x3E #define TXGBE_MPLLA_CTL0 0x51 #define TXGBE_MPLLA_CTL2 0x53 #define TXGBE_MPLLA_CTL2_DIV16P5_CLK_EN BIT(10) #define TXGBE_MPLLA_CTL2_DIV10_CLK_EN BIT(9) #define TXGBE_MPLLA_CTL3 0x57 #define TXGBE_MISC_CTL0 0x70 #define TXGBE_MISC_CTL0_PLL BIT(15) #define TXGBE_MISC_CTL0_CR_PARA_SEL BIT(14) #define TXGBE_MISC_CTL0_RX_VREF(v) FIELD_PREP(GENMASK(12, 8), v) #define TXGBE_VCO_CAL_LD0 0x72 #define TXGBE_VCO_CAL_REF0 0x76 static int txgbe_read_pma(struct dw_xpcs *xpcs, int reg) { return xpcs_read(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg); } static int txgbe_write_pma(struct dw_xpcs *xpcs, int reg, u16 val) { return xpcs_write(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg, val); } static void txgbe_pma_config_10gbaser(struct dw_xpcs *xpcs) { int val; txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x21); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0); val = txgbe_read_pma(xpcs, TXGBE_TX_GENCTL1); val = u16_replace_bits(val, 0x5, TXGBE_TX_GENCTL1_VBOOST_LVL); txgbe_write_pma(xpcs, TXGBE_TX_GENCTL1, val); txgbe_write_pma(xpcs, TXGBE_MISC_CTL0, TXGBE_MISC_CTL0_PLL | TXGBE_MISC_CTL0_CR_PARA_SEL | TXGBE_MISC_CTL0_RX_VREF(0xF)); txgbe_write_pma(xpcs, TXGBE_VCO_CAL_LD0, 0x549); txgbe_write_pma(xpcs, TXGBE_VCO_CAL_REF0, 0x29); txgbe_write_pma(xpcs, TXGBE_TX_RATE_CTL, 0); txgbe_write_pma(xpcs, TXGBE_RX_RATE_CTL, 0); txgbe_write_pma(xpcs, TXGBE_TX_GEN_CTL2, TXGBE_TX_GEN_CTL2_TX0_WIDTH(3)); txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL2, TXGBE_RX_GEN_CTL2_RX0_WIDTH(3)); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL2, TXGBE_MPLLA_CTL2_DIV16P5_CLK_EN | TXGBE_MPLLA_CTL2_DIV10_CLK_EN); txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL0, TXGBE_RX_EQ_CTL0_CTLE_POLE(2) | TXGBE_RX_EQ_CTL0_CTLE_BOOST(5)); val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL); val &= ~TXGBE_RX_EQ_ATTN_LVL0; txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0xBE); val = txgbe_read_pma(xpcs, TXGBE_AFE_DFE_ENABLE); val &= ~(TXGBE_DFE_EN_0 | TXGBE_AFE_EN_0); txgbe_write_pma(xpcs, TXGBE_AFE_DFE_ENABLE, val); val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_CTL4); val &= ~TXGBE_RX_EQ_CTL4_CONT_ADAPT0; txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL4, val); } static void txgbe_pma_config_1g(struct dw_xpcs *xpcs) { int val; val = txgbe_read_pma(xpcs, TXGBE_TX_GENCTL1); val = u16_replace_bits(val, 0x5, TXGBE_TX_GENCTL1_VBOOST_LVL); val &= ~TXGBE_TX_GENCTL1_VBOOST_EN0; txgbe_write_pma(xpcs, TXGBE_TX_GENCTL1, val); txgbe_write_pma(xpcs, TXGBE_MISC_CTL0, TXGBE_MISC_CTL0_PLL | TXGBE_MISC_CTL0_CR_PARA_SEL | TXGBE_MISC_CTL0_RX_VREF(0xF)); txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL0, TXGBE_RX_EQ_CTL0_VGA1_GAIN(7) | TXGBE_RX_EQ_CTL0_VGA2_GAIN(7) | TXGBE_RX_EQ_CTL0_CTLE_BOOST(6)); val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL); val &= ~TXGBE_RX_EQ_ATTN_LVL0; txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0); val = txgbe_read_pma(xpcs, TXGBE_RX_GEN_CTL3); val = u16_replace_bits(val, 0x4, TXGBE_RX_GEN_CTL3_LOS_TRSHLD0); txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x20); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0x46); txgbe_write_pma(xpcs, TXGBE_VCO_CAL_LD0, 0x540); txgbe_write_pma(xpcs, TXGBE_VCO_CAL_REF0, 0x2A); txgbe_write_pma(xpcs, TXGBE_AFE_DFE_ENABLE, 0); txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL4, TXGBE_RX_EQ_CTL4_CONT_OFF_CAN0); txgbe_write_pma(xpcs, TXGBE_TX_RATE_CTL, TXGBE_TX_RATE_CTL_TX0_RATE(3)); txgbe_write_pma(xpcs, TXGBE_RX_RATE_CTL, TXGBE_RX_RATE_CTL_RX0_RATE(3)); txgbe_write_pma(xpcs, TXGBE_TX_GEN_CTL2, TXGBE_TX_GEN_CTL2_TX0_WIDTH(1)); txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL2, TXGBE_RX_GEN_CTL2_RX0_WIDTH(1)); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL2, TXGBE_MPLLA_CTL2_DIV10_CLK_EN); } static int txgbe_pcs_poll_power_up(struct dw_xpcs *xpcs) { int val, ret; /* Wait xpcs power-up good */ ret = read_poll_timeout(xpcs_read_vpcs, val, (val & DW_PSEQ_ST) == DW_PSEQ_ST_GOOD, 10000, 1000000, false, xpcs, DW_VR_XS_PCS_DIG_STS); if (ret < 0) dev_err(&xpcs->mdiodev->dev, "xpcs power-up timeout\n"); return ret; } static int txgbe_pma_init_done(struct dw_xpcs *xpcs) { int val, ret; xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_VR_RST | DW_EN_VSMMD1); /* wait pma initialization done */ ret = read_poll_timeout(xpcs_read_vpcs, val, !(val & DW_VR_RST), 100000, 10000000, false, xpcs, DW_VR_XS_PCS_DIG_CTRL1); if (ret < 0) dev_err(&xpcs->mdiodev->dev, "xpcs pma initialization timeout\n"); return ret; } static bool txgbe_xpcs_mode_quirk(struct dw_xpcs *xpcs) { int ret; /* When txgbe do LAN reset, PCS will change to default 10GBASE-R mode */ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_CTRL2); ret &= MDIO_PCS_CTRL2_TYPE; if ((ret == MDIO_PCS_CTRL2_10GBR && xpcs->interface != PHY_INTERFACE_MODE_10GBASER) || xpcs->interface == PHY_INTERFACE_MODE_SGMII) return true; return false; } int txgbe_xpcs_switch_mode(struct dw_xpcs *xpcs, phy_interface_t interface) { int val, ret; switch (interface) { case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: break; default: return 0; } if (xpcs->interface == interface && !txgbe_xpcs_mode_quirk(xpcs)) return 0; xpcs->interface = interface; ret = txgbe_pcs_poll_power_up(xpcs); if (ret < 0) return ret; if (interface == PHY_INTERFACE_MODE_10GBASER) { xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL2, MDIO_PCS_CTRL2_10GBR); val = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1); val |= MDIO_CTRL1_SPEED10G; xpcs_write(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1, val); txgbe_pma_config_10gbaser(xpcs); } else { xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL2, MDIO_PCS_CTRL2_10GBX); xpcs_write(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1, 0); xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL1, 0); txgbe_pma_config_1g(xpcs); } return txgbe_pma_init_done(xpcs); }
linux-master
drivers/net/pcs/pcs-xpcs-wx.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2018-2019 MediaTek Inc. /* A library for MediaTek SGMII circuit * * Author: Sean Wang <[email protected]> * Author: Alexander Couzens <[email protected]> * Author: Daniel Golle <[email protected]> * */ #include <linux/mdio.h> #include <linux/of.h> #include <linux/pcs/pcs-mtk-lynxi.h> #include <linux/phylink.h> #include <linux/regmap.h> /* SGMII subsystem config registers */ /* BMCR (low 16) BMSR (high 16) */ #define SGMSYS_PCS_CONTROL_1 0x0 #define SGMII_BMCR GENMASK(15, 0) #define SGMII_BMSR GENMASK(31, 16) #define SGMSYS_PCS_DEVICE_ID 0x4 #define SGMII_LYNXI_DEV_ID 0x4d544950 #define SGMSYS_PCS_ADVERTISE 0x8 #define SGMII_ADVERTISE GENMASK(15, 0) #define SGMII_LPA GENMASK(31, 16) #define SGMSYS_PCS_SCRATCH 0x14 #define SGMII_DEV_VERSION GENMASK(31, 16) /* Register to programmable link timer, the unit in 2 * 8ns */ #define SGMSYS_PCS_LINK_TIMER 0x18 #define SGMII_LINK_TIMER_MASK GENMASK(19, 0) #define SGMII_LINK_TIMER_VAL(ns) FIELD_PREP(SGMII_LINK_TIMER_MASK, \ ((ns) / 2 / 8)) /* Register to control remote fault */ #define SGMSYS_SGMII_MODE 0x20 #define SGMII_IF_MODE_SGMII BIT(0) #define SGMII_SPEED_DUPLEX_AN BIT(1) #define SGMII_SPEED_MASK GENMASK(3, 2) #define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0) #define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1) #define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2) #define SGMII_DUPLEX_HALF BIT(4) #define SGMII_REMOTE_FAULT_DIS BIT(8) /* Register to reset SGMII design */ #define SGMSYS_RESERVED_0 0x34 #define SGMII_SW_RESET BIT(0) /* Register to set SGMII speed, ANA RG_ Control Signals III */ #define SGMII_PHY_SPEED_MASK GENMASK(3, 2) #define SGMII_PHY_SPEED_1_25G FIELD_PREP(SGMII_PHY_SPEED_MASK, 0) #define SGMII_PHY_SPEED_3_125G FIELD_PREP(SGMII_PHY_SPEED_MASK, 1) /* Register to power up QPHY */ #define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8 #define SGMII_PHYA_PWD BIT(4) /* Register to QPHY wrapper control */ #define SGMSYS_QPHY_WRAP_CTRL 0xec #define SGMII_PN_SWAP_MASK GENMASK(1, 0) #define SGMII_PN_SWAP_TX_RX (BIT(0) | BIT(1)) /* struct mtk_pcs_lynxi - This structure holds each sgmii regmap andassociated * data * @regmap: The register map pointing at the range used to setup * SGMII modes * @dev: Pointer to device owning the PCS * @ana_rgc3: The offset of register ANA_RGC3 relative to regmap * @interface: Currently configured interface mode * @pcs: Phylink PCS structure * @flags: Flags indicating hardware properties */ struct mtk_pcs_lynxi { struct regmap *regmap; u32 ana_rgc3; phy_interface_t interface; struct phylink_pcs pcs; u32 flags; }; static struct mtk_pcs_lynxi *pcs_to_mtk_pcs_lynxi(struct phylink_pcs *pcs) { return container_of(pcs, struct mtk_pcs_lynxi, pcs); } static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs); unsigned int bm, adv; /* Read the BMSR and LPA */ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm); regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv); phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm), FIELD_GET(SGMII_LPA, adv)); } static int mtk_pcs_lynxi_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac) { struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs); bool mode_changed = false, changed; unsigned int rgc3, sgm_mode, bmcr; int advertise, link_timer; advertise = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); if (advertise < 0) return advertise; /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and * we assume that fixes it's speed at bitrate = line rate (in * other words, 1000Mbps or 2500Mbps). */ if (interface == PHY_INTERFACE_MODE_SGMII) sgm_mode = SGMII_IF_MODE_SGMII; else sgm_mode = 0; if (neg_mode & PHYLINK_PCS_NEG_INBAND) sgm_mode |= SGMII_REMOTE_FAULT_DIS; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { if (interface == PHY_INTERFACE_MODE_SGMII) sgm_mode |= SGMII_SPEED_DUPLEX_AN; bmcr = BMCR_ANENABLE; } else { bmcr = 0; } if (mpcs->interface != interface) { link_timer = phylink_get_link_timer_ns(interface); if (link_timer < 0) return link_timer; /* PHYA power down */ regmap_set_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, SGMII_PHYA_PWD); /* Reset SGMII PCS state */ regmap_set_bits(mpcs->regmap, SGMSYS_RESERVED_0, SGMII_SW_RESET); if (mpcs->flags & MTK_SGMII_FLAG_PN_SWAP) regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_WRAP_CTRL, SGMII_PN_SWAP_MASK, SGMII_PN_SWAP_TX_RX); if (interface == PHY_INTERFACE_MODE_2500BASEX) rgc3 = SGMII_PHY_SPEED_3_125G; else rgc3 = SGMII_PHY_SPEED_1_25G; /* Configure the underlying interface speed */ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3, SGMII_PHY_SPEED_MASK, rgc3); /* Setup the link timer */ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, SGMII_LINK_TIMER_VAL(link_timer)); mpcs->interface = interface; mode_changed = true; } /* Update the advertisement, noting whether it has changed */ regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE, SGMII_ADVERTISE, advertise, &changed); /* Update the sgmsys mode register */ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE, SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN | SGMII_IF_MODE_SGMII, sgm_mode); /* Update the BMCR */ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1, BMCR_ANENABLE, bmcr); /* Release PHYA power down state * Only removing bit SGMII_PHYA_PWD isn't enough. * There are cases when the SGMII_PHYA_PWD register contains 0x9 which * prevents SGMII from working. The SGMII still shows link but no traffic * can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was * taken from a good working state of the SGMII interface. * Unknown how much the QPHY needs but it is racy without a sleep. * Tested on mt7622 & mt7986. */ usleep_range(50, 100); regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0); return changed || mode_changed; } static void mtk_pcs_lynxi_restart_an(struct phylink_pcs *pcs) { struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs); regmap_set_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1, BMCR_ANRESTART); } static void mtk_pcs_lynxi_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex) { struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs); unsigned int sgm_mode; if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED) { /* Force the speed and duplex setting */ if (speed == SPEED_10) sgm_mode = SGMII_SPEED_10; else if (speed == SPEED_100) sgm_mode = SGMII_SPEED_100; else sgm_mode = SGMII_SPEED_1000; if (duplex != DUPLEX_FULL) sgm_mode |= SGMII_DUPLEX_HALF; regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE, SGMII_DUPLEX_HALF | SGMII_SPEED_MASK, sgm_mode); } } static void mtk_pcs_lynxi_disable(struct phylink_pcs *pcs) { struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs); mpcs->interface = PHY_INTERFACE_MODE_NA; } static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = { .pcs_get_state = mtk_pcs_lynxi_get_state, .pcs_config = mtk_pcs_lynxi_config, .pcs_an_restart = mtk_pcs_lynxi_restart_an, .pcs_link_up = mtk_pcs_lynxi_link_up, .pcs_disable = mtk_pcs_lynxi_disable, }; struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev, struct regmap *regmap, u32 ana_rgc3, u32 flags) { struct mtk_pcs_lynxi *mpcs; u32 id, ver; int ret; ret = regmap_read(regmap, SGMSYS_PCS_DEVICE_ID, &id); if (ret < 0) return NULL; if (id != SGMII_LYNXI_DEV_ID) { dev_err(dev, "unknown PCS device id %08x\n", id); return NULL; } ret = regmap_read(regmap, SGMSYS_PCS_SCRATCH, &ver); if (ret < 0) return NULL; ver = FIELD_GET(SGMII_DEV_VERSION, ver); if (ver != 0x1) { dev_err(dev, "unknown PCS device version %04x\n", ver); return NULL; } dev_dbg(dev, "MediaTek LynxI SGMII PCS (id 0x%08x, ver 0x%04x)\n", id, ver); mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL); if (!mpcs) return NULL; mpcs->ana_rgc3 = ana_rgc3; mpcs->regmap = regmap; mpcs->flags = flags; mpcs->pcs.ops = &mtk_pcs_lynxi_ops; mpcs->pcs.neg_mode = true; mpcs->pcs.poll = true; mpcs->interface = PHY_INTERFACE_MODE_NA; return &mpcs->pcs; } EXPORT_SYMBOL(mtk_pcs_lynxi_create); void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs) { if (!pcs) return; kfree(pcs_to_mtk_pcs_lynxi(pcs)); } EXPORT_SYMBOL(mtk_pcs_lynxi_destroy); MODULE_LICENSE("GPL");
linux-master
drivers/net/pcs/pcs-mtk-lynxi.c
// SPDX-License-Identifier: GPL-2.0+ /* * Broadcom BCM6368 mdiomux bus controller driver * * Copyright (C) 2021 Álvaro Fernández Rojas <[email protected]> */ #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/sched.h> #define MDIOC_REG 0x0 #define MDIOC_EXT_MASK BIT(16) #define MDIOC_REG_SHIFT 20 #define MDIOC_PHYID_SHIFT 25 #define MDIOC_RD_MASK BIT(30) #define MDIOC_WR_MASK BIT(31) #define MDIOD_REG 0x4 struct bcm6368_mdiomux_desc { void *mux_handle; void __iomem *base; struct device *dev; struct mii_bus *mii_bus; int ext_phy; }; static int bcm6368_mdiomux_read(struct mii_bus *bus, int phy_id, int loc) { struct bcm6368_mdiomux_desc *md = bus->priv; uint32_t reg; int ret; __raw_writel(0, md->base + MDIOC_REG); reg = MDIOC_RD_MASK | (phy_id << MDIOC_PHYID_SHIFT) | (loc << MDIOC_REG_SHIFT); if (md->ext_phy) reg |= MDIOC_EXT_MASK; __raw_writel(reg, md->base + MDIOC_REG); udelay(50); ret = __raw_readw(md->base + MDIOD_REG); return ret; } static int bcm6368_mdiomux_write(struct mii_bus *bus, int phy_id, int loc, uint16_t val) { struct bcm6368_mdiomux_desc *md = bus->priv; uint32_t reg; __raw_writel(0, md->base + MDIOC_REG); reg = MDIOC_WR_MASK | (phy_id << MDIOC_PHYID_SHIFT) | (loc << MDIOC_REG_SHIFT); if (md->ext_phy) reg |= MDIOC_EXT_MASK; reg |= val; __raw_writel(reg, md->base + MDIOC_REG); udelay(50); return 0; } static int bcm6368_mdiomux_switch_fn(int current_child, int desired_child, void *data) { struct bcm6368_mdiomux_desc *md = data; md->ext_phy = desired_child; return 0; } static int bcm6368_mdiomux_probe(struct platform_device *pdev) { struct bcm6368_mdiomux_desc *md; struct mii_bus *bus; struct resource *res; int rc; md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL); if (!md) return -ENOMEM; md->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; /* * Just ioremap, as this MDIO block is usually integrated into an * Ethernet MAC controller register range */ md->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!md->base) { dev_err(&pdev->dev, "failed to ioremap register\n"); return -ENOMEM; } md->mii_bus = devm_mdiobus_alloc(&pdev->dev); if (!md->mii_bus) { dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); return -ENOMEM; } bus = md->mii_bus; bus->priv = md; bus->name = "BCM6368 MDIO mux bus"; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); bus->parent = &pdev->dev; bus->read = bcm6368_mdiomux_read; bus->write = bcm6368_mdiomux_write; bus->phy_mask = 0x3f; bus->dev.of_node = pdev->dev.of_node; rc = mdiobus_register(bus); if (rc) { dev_err(&pdev->dev, "mdiomux registration failed\n"); return rc; } platform_set_drvdata(pdev, md); rc = mdio_mux_init(md->dev, md->dev->of_node, bcm6368_mdiomux_switch_fn, &md->mux_handle, md, md->mii_bus); if (rc) { dev_info(md->dev, "mdiomux initialization failed\n"); goto out_register; } dev_info(&pdev->dev, "Broadcom BCM6368 MDIO mux bus\n"); return 0; out_register: mdiobus_unregister(bus); return rc; } static int bcm6368_mdiomux_remove(struct platform_device *pdev) { struct bcm6368_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); return 0; } static const struct of_device_id bcm6368_mdiomux_ids[] = { { .compatible = "brcm,bcm6368-mdio-mux", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, bcm6368_mdiomux_ids); static struct platform_driver bcm6368_mdiomux_driver = { .driver = { .name = "bcm6368-mdio-mux", .of_match_table = bcm6368_mdiomux_ids, }, .probe = bcm6368_mdiomux_probe, .remove = bcm6368_mdiomux_remove, }; module_platform_driver(bcm6368_mdiomux_driver); MODULE_AUTHOR("Álvaro Fernández Rojas <[email protected]>"); MODULE_DESCRIPTION("BCM6368 mdiomux bus controller driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-mux-bcm6368.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Driver for MMIO-Mapped MDIO devices. Some IPs expose internal PHYs or PCS * within the MMIO-mapped area * * Copyright (C) 2023 Maxime Chevallier <[email protected]> */ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/mdio.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/mdio/mdio-regmap.h> #define DRV_NAME "mdio-regmap" struct mdio_regmap_priv { struct regmap *regmap; u8 valid_addr; }; static int mdio_regmap_read_c22(struct mii_bus *bus, int addr, int regnum) { struct mdio_regmap_priv *ctx = bus->priv; unsigned int val; int ret; if (ctx->valid_addr != addr) return -ENODEV; ret = regmap_read(ctx->regmap, regnum, &val); if (ret < 0) return ret; return val; } static int mdio_regmap_write_c22(struct mii_bus *bus, int addr, int regnum, u16 val) { struct mdio_regmap_priv *ctx = bus->priv; if (ctx->valid_addr != addr) return -ENODEV; return regmap_write(ctx->regmap, regnum, val); } struct mii_bus *devm_mdio_regmap_register(struct device *dev, const struct mdio_regmap_config *config) { struct mdio_regmap_priv *mr; struct mii_bus *mii; int rc; if (!config->parent) return ERR_PTR(-EINVAL); mii = devm_mdiobus_alloc_size(config->parent, sizeof(*mr)); if (!mii) return ERR_PTR(-ENOMEM); mr = mii->priv; mr->regmap = config->regmap; mr->valid_addr = config->valid_addr; mii->name = DRV_NAME; strscpy(mii->id, config->name, MII_BUS_ID_SIZE); mii->parent = config->parent; mii->read = mdio_regmap_read_c22; mii->write = mdio_regmap_write_c22; if (config->autoscan) mii->phy_mask = ~BIT(config->valid_addr); else mii->phy_mask = ~0; rc = devm_mdiobus_register(dev, mii); if (rc) { dev_err(config->parent, "Cannot register MDIO bus![%s] (%d)\n", mii->id, rc); return ERR_PTR(rc); } return mii; } EXPORT_SYMBOL_GPL(devm_mdio_regmap_register); MODULE_DESCRIPTION("MDIO API over regmap"); MODULE_AUTHOR("Maxime Chevallier <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/mdio/mdio-regmap.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009-2016 Cavium, Inc. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> #include <linux/phy.h> #include "mdio-cavium.h" static void cavium_mdiobus_set_mode(struct cavium_mdiobus *p, enum cavium_mdiobus_mode m) { union cvmx_smix_clk smi_clk; if (m == p->mode) return; smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK); smi_clk.s.mode = (m == C45) ? 1 : 0; smi_clk.s.preamble = 1; oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK); p->mode = m; } static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p, int phy_id, int devad, int regnum) { union cvmx_smix_cmd smi_cmd; union cvmx_smix_wr_dat smi_wr; int timeout = 1000; cavium_mdiobus_set_mode(p, C45); smi_wr.u64 = 0; smi_wr.s.dat = regnum & 0xffff; oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); smi_cmd.u64 = 0; smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = devad; oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); } while (smi_wr.s.pending && --timeout); if (timeout <= 0) return -EIO; return 0; } int cavium_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int regnum) { struct cavium_mdiobus *p = bus->priv; union cvmx_smix_cmd smi_cmd; union cvmx_smix_rd_dat smi_rd; int timeout = 1000; cavium_mdiobus_set_mode(p, C22); smi_cmd.u64 = 0; smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT); } while (smi_rd.s.pending && --timeout); if (smi_rd.s.val) return smi_rd.s.dat; else return -EIO; } EXPORT_SYMBOL(cavium_mdiobus_read_c22); int cavium_mdiobus_read_c45(struct mii_bus *bus, int phy_id, int devad, int regnum) { struct cavium_mdiobus *p = bus->priv; union cvmx_smix_cmd smi_cmd; union cvmx_smix_rd_dat smi_rd; int timeout = 1000; int r; r = cavium_mdiobus_c45_addr(p, phy_id, devad, regnum); if (r < 0) return r; smi_cmd.u64 = 0; smi_cmd.s.phy_op = 3; /* MDIO_CLAUSE_45_READ */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT); } while (smi_rd.s.pending && --timeout); if (smi_rd.s.val) return smi_rd.s.dat; else return -EIO; } EXPORT_SYMBOL(cavium_mdiobus_read_c45); int cavium_mdiobus_write_c22(struct mii_bus *bus, int phy_id, int regnum, u16 val) { struct cavium_mdiobus *p = bus->priv; union cvmx_smix_cmd smi_cmd; union cvmx_smix_wr_dat smi_wr; int timeout = 1000; cavium_mdiobus_set_mode(p, C22); smi_wr.u64 = 0; smi_wr.s.dat = val; oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); smi_cmd.u64 = 0; smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); } while (smi_wr.s.pending && --timeout); if (timeout <= 0) return -EIO; return 0; } EXPORT_SYMBOL(cavium_mdiobus_write_c22); int cavium_mdiobus_write_c45(struct mii_bus *bus, int phy_id, int devad, int regnum, u16 val) { struct cavium_mdiobus *p = bus->priv; union cvmx_smix_cmd smi_cmd; union cvmx_smix_wr_dat smi_wr; int timeout = 1000; int r; r = cavium_mdiobus_c45_addr(p, phy_id, devad, regnum); if (r < 0) return r; smi_wr.u64 = 0; smi_wr.s.dat = val; oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); smi_cmd.u64 = 0; smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_45_WRITE */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = devad; oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); } while (smi_wr.s.pending && --timeout); if (timeout <= 0) return -EIO; return 0; } EXPORT_SYMBOL(cavium_mdiobus_write_c45); MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers"); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-cavium.c
// SPDX-License-Identifier: GPL-2.0+ /* * Hisilicon Fast Ethernet MDIO Bus Driver * * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. */ #include <linux/clk.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/platform_device.h> #define MDIO_RWCTRL 0x00 #define MDIO_RO_DATA 0x04 #define MDIO_WRITE BIT(13) #define MDIO_RW_FINISH BIT(15) #define BIT_PHY_ADDR_OFFSET 8 #define BIT_WR_DATA_OFFSET 16 struct hisi_femac_mdio_data { struct clk *clk; void __iomem *membase; }; static int hisi_femac_mdio_wait_ready(struct hisi_femac_mdio_data *data) { u32 val; return readl_poll_timeout(data->membase + MDIO_RWCTRL, val, val & MDIO_RW_FINISH, 20, 10000); } static int hisi_femac_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct hisi_femac_mdio_data *data = bus->priv; int ret; ret = hisi_femac_mdio_wait_ready(data); if (ret) return ret; writel((mii_id << BIT_PHY_ADDR_OFFSET) | regnum, data->membase + MDIO_RWCTRL); ret = hisi_femac_mdio_wait_ready(data); if (ret) return ret; return readl(data->membase + MDIO_RO_DATA) & 0xFFFF; } static int hisi_femac_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct hisi_femac_mdio_data *data = bus->priv; int ret; ret = hisi_femac_mdio_wait_ready(data); if (ret) return ret; writel(MDIO_WRITE | (value << BIT_WR_DATA_OFFSET) | (mii_id << BIT_PHY_ADDR_OFFSET) | regnum, data->membase + MDIO_RWCTRL); return hisi_femac_mdio_wait_ready(data); } static int hisi_femac_mdio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct hisi_femac_mdio_data *data; int ret; bus = mdiobus_alloc_size(sizeof(*data)); if (!bus) return -ENOMEM; bus->name = "hisi_femac_mii_bus"; bus->read = &hisi_femac_mdio_read; bus->write = &hisi_femac_mdio_write; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); bus->parent = &pdev->dev; data = bus->priv; data->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->membase)) { ret = PTR_ERR(data->membase); goto err_out_free_mdiobus; } data->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(data->clk)) { ret = PTR_ERR(data->clk); goto err_out_free_mdiobus; } ret = clk_prepare_enable(data->clk); if (ret) goto err_out_free_mdiobus; ret = of_mdiobus_register(bus, np); if (ret) goto err_out_disable_clk; platform_set_drvdata(pdev, bus); return 0; err_out_disable_clk: clk_disable_unprepare(data->clk); err_out_free_mdiobus: mdiobus_free(bus); return ret; } static int hisi_femac_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct hisi_femac_mdio_data *data = bus->priv; mdiobus_unregister(bus); clk_disable_unprepare(data->clk); mdiobus_free(bus); return 0; } static const struct of_device_id hisi_femac_mdio_dt_ids[] = { { .compatible = "hisilicon,hisi-femac-mdio" }, { } }; MODULE_DEVICE_TABLE(of, hisi_femac_mdio_dt_ids); static struct platform_driver hisi_femac_mdio_driver = { .probe = hisi_femac_mdio_probe, .remove = hisi_femac_mdio_remove, .driver = { .name = "hisi-femac-mdio", .of_match_table = hisi_femac_mdio_dt_ids, }, }; module_platform_driver(hisi_femac_mdio_driver); MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); MODULE_AUTHOR("Dongpo Li <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/mdio/mdio-hisi-femac.c
// SPDX-License-Identifier: GPL-2.0 /* * MDIO I2C bridge * * Copyright (C) 2015-2016 Russell King * Copyright (C) 2021 Marek Behun * * Network PHYs can appear on I2C buses when they are part of SFP module. * This driver exposes these PHYs to the networking PHY code, allowing * our PHY drivers access to these PHYs, and so allowing configuration * of their settings. */ #include <linux/i2c.h> #include <linux/mdio/mdio-i2c.h> #include <linux/phy.h> #include <linux/sfp.h> /* * I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is * specified to be present in SFP modules. These correspond with PHY * addresses 16 and 17. Disallow access to these "phy" addresses. */ static bool i2c_mii_valid_phy_id(int phy_id) { return phy_id != 0x10 && phy_id != 0x11; } static unsigned int i2c_mii_phy_addr(int phy_id) { return phy_id + 0x40; } static int i2c_mii_read_default_c45(struct mii_bus *bus, int phy_id, int devad, int reg) { struct i2c_adapter *i2c = bus->priv; struct i2c_msg msgs[2]; u8 addr[3], data[2], *p; int bus_addr, ret; if (!i2c_mii_valid_phy_id(phy_id)) return 0xffff; p = addr; if (devad >= 0) { *p++ = 0x20 | devad; *p++ = reg >> 8; } *p++ = reg; bus_addr = i2c_mii_phy_addr(phy_id); msgs[0].addr = bus_addr; msgs[0].flags = 0; msgs[0].len = p - addr; msgs[0].buf = addr; msgs[1].addr = bus_addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(data); msgs[1].buf = data; ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return 0xffff; return data[0] << 8 | data[1]; } static int i2c_mii_write_default_c45(struct mii_bus *bus, int phy_id, int devad, int reg, u16 val) { struct i2c_adapter *i2c = bus->priv; struct i2c_msg msg; int ret; u8 data[5], *p; if (!i2c_mii_valid_phy_id(phy_id)) return 0; p = data; if (devad >= 0) { *p++ = devad; *p++ = reg >> 8; } *p++ = reg; *p++ = val >> 8; *p++ = val; msg.addr = i2c_mii_phy_addr(phy_id); msg.flags = 0; msg.len = p - data; msg.buf = data; ret = i2c_transfer(i2c, &msg, 1); return ret < 0 ? ret : 0; } static int i2c_mii_read_default_c22(struct mii_bus *bus, int phy_id, int reg) { return i2c_mii_read_default_c45(bus, phy_id, -1, reg); } static int i2c_mii_write_default_c22(struct mii_bus *bus, int phy_id, int reg, u16 val) { return i2c_mii_write_default_c45(bus, phy_id, -1, reg, val); } /* RollBall SFPs do not access internal PHY via I2C address 0x56, but * instead via address 0x51, when SFP page is set to 0x03 and password to * 0xffffffff. * * address size contents description * ------- ---- -------- ----------- * 0x80 1 CMD 0x01/0x02/0x04 for write/read/done * 0x81 1 DEV Clause 45 device * 0x82 2 REG Clause 45 register * 0x84 2 VAL Register value */ #define ROLLBALL_PHY_I2C_ADDR 0x51 #define ROLLBALL_PASSWORD (SFP_VSL + 3) #define ROLLBALL_CMD_ADDR 0x80 #define ROLLBALL_DATA_ADDR 0x81 #define ROLLBALL_CMD_WRITE 0x01 #define ROLLBALL_CMD_READ 0x02 #define ROLLBALL_CMD_DONE 0x04 #define SFP_PAGE_ROLLBALL_MDIO 3 static int __i2c_transfer_err(struct i2c_adapter *i2c, struct i2c_msg *msgs, int num) { int ret; ret = __i2c_transfer(i2c, msgs, num); if (ret < 0) return ret; else if (ret != num) return -EIO; else return 0; } static int __i2c_rollball_get_page(struct i2c_adapter *i2c, int bus_addr, u8 *page) { struct i2c_msg msgs[2]; u8 addr = SFP_PAGE; msgs[0].addr = bus_addr; msgs[0].flags = 0; msgs[0].len = 1; msgs[0].buf = &addr; msgs[1].addr = bus_addr; msgs[1].flags = I2C_M_RD; msgs[1].len = 1; msgs[1].buf = page; return __i2c_transfer_err(i2c, msgs, 2); } static int __i2c_rollball_set_page(struct i2c_adapter *i2c, int bus_addr, u8 page) { struct i2c_msg msg; u8 buf[2]; buf[0] = SFP_PAGE; buf[1] = page; msg.addr = bus_addr; msg.flags = 0; msg.len = 2; msg.buf = buf; return __i2c_transfer_err(i2c, &msg, 1); } /* In order to not interfere with other SFP code (which possibly may manipulate * SFP_PAGE), for every transfer we do this: * 1. lock the bus * 2. save content of SFP_PAGE * 3. set SFP_PAGE to 3 * 4. do the transfer * 5. restore original SFP_PAGE * 6. unlock the bus * Note that one might think that steps 2 to 5 could be theoretically done all * in one call to i2c_transfer (by constructing msgs array in such a way), but * unfortunately tests show that this does not work :-( Changed SFP_PAGE does * not take into account until i2c_transfer() is done. */ static int i2c_transfer_rollball(struct i2c_adapter *i2c, struct i2c_msg *msgs, int num) { int ret, main_err = 0; u8 saved_page; i2c_lock_bus(i2c, I2C_LOCK_SEGMENT); /* save original page */ ret = __i2c_rollball_get_page(i2c, msgs->addr, &saved_page); if (ret) goto unlock; /* change to RollBall MDIO page */ ret = __i2c_rollball_set_page(i2c, msgs->addr, SFP_PAGE_ROLLBALL_MDIO); if (ret) goto unlock; /* do the transfer; we try to restore original page if this fails */ ret = __i2c_transfer_err(i2c, msgs, num); if (ret) main_err = ret; /* restore original page */ ret = __i2c_rollball_set_page(i2c, msgs->addr, saved_page); unlock: i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT); return main_err ? : ret; } static int i2c_rollball_mii_poll(struct mii_bus *bus, int bus_addr, u8 *buf, size_t len) { struct i2c_adapter *i2c = bus->priv; struct i2c_msg msgs[2]; u8 cmd_addr, tmp, *res; int i, ret; cmd_addr = ROLLBALL_CMD_ADDR; res = buf ? buf : &tmp; len = buf ? len : 1; msgs[0].addr = bus_addr; msgs[0].flags = 0; msgs[0].len = 1; msgs[0].buf = &cmd_addr; msgs[1].addr = bus_addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = res; /* By experiment it takes up to 70 ms to access a register for these * SFPs. Sleep 20ms between iterations and try 10 times. */ i = 10; do { msleep(20); ret = i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs)); if (ret) return ret; if (*res == ROLLBALL_CMD_DONE) return 0; } while (i-- > 0); dev_dbg(&bus->dev, "poll timed out\n"); return -ETIMEDOUT; } static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd, u8 *data, size_t len) { struct i2c_adapter *i2c = bus->priv; struct i2c_msg msgs[2]; u8 cmdbuf[2]; cmdbuf[0] = ROLLBALL_CMD_ADDR; cmdbuf[1] = cmd; msgs[0].addr = bus_addr; msgs[0].flags = 0; msgs[0].len = len; msgs[0].buf = data; msgs[1].addr = bus_addr; msgs[1].flags = 0; msgs[1].len = sizeof(cmdbuf); msgs[1].buf = cmdbuf; return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs)); } static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int devad, int reg) { u8 buf[4], res[6]; int bus_addr, ret; u16 val; bus_addr = i2c_mii_phy_addr(phy_id); if (bus_addr != ROLLBALL_PHY_I2C_ADDR) return 0xffff; buf[0] = ROLLBALL_DATA_ADDR; buf[1] = devad; buf[2] = (reg >> 8) & 0xff; buf[3] = reg & 0xff; ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_READ, buf, sizeof(buf)); if (ret < 0) return ret; ret = i2c_rollball_mii_poll(bus, bus_addr, res, sizeof(res)); if (ret == -ETIMEDOUT) return 0xffff; else if (ret < 0) return ret; val = res[4] << 8 | res[5]; return val; } static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int devad, int reg, u16 val) { int bus_addr, ret; u8 buf[6]; bus_addr = i2c_mii_phy_addr(phy_id); if (bus_addr != ROLLBALL_PHY_I2C_ADDR) return 0; buf[0] = ROLLBALL_DATA_ADDR; buf[1] = devad; buf[2] = (reg >> 8) & 0xff; buf[3] = reg & 0xff; buf[4] = val >> 8; buf[5] = val & 0xff; ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_WRITE, buf, sizeof(buf)); if (ret < 0) return ret; ret = i2c_rollball_mii_poll(bus, bus_addr, NULL, 0); if (ret < 0) return ret; return 0; } static int i2c_mii_init_rollball(struct i2c_adapter *i2c) { struct i2c_msg msg; u8 pw[5]; int ret; pw[0] = ROLLBALL_PASSWORD; pw[1] = 0xff; pw[2] = 0xff; pw[3] = 0xff; pw[4] = 0xff; msg.addr = ROLLBALL_PHY_I2C_ADDR; msg.flags = 0; msg.len = sizeof(pw); msg.buf = pw; ret = i2c_transfer(i2c, &msg, 1); if (ret < 0) return ret; else if (ret != 1) return -EIO; else return 0; } struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c, enum mdio_i2c_proto protocol) { struct mii_bus *mii; int ret; if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) return ERR_PTR(-EINVAL); mii = mdiobus_alloc(); if (!mii) return ERR_PTR(-ENOMEM); snprintf(mii->id, MII_BUS_ID_SIZE, "i2c:%s", dev_name(parent)); mii->parent = parent; mii->priv = i2c; switch (protocol) { case MDIO_I2C_ROLLBALL: ret = i2c_mii_init_rollball(i2c); if (ret < 0) { dev_err(parent, "Cannot initialize RollBall MDIO I2C protocol: %d\n", ret); mdiobus_free(mii); return ERR_PTR(ret); } mii->read_c45 = i2c_mii_read_rollball; mii->write_c45 = i2c_mii_write_rollball; break; default: mii->read = i2c_mii_read_default_c22; mii->write = i2c_mii_write_default_c22; mii->read_c45 = i2c_mii_read_default_c45; mii->write_c45 = i2c_mii_write_default_c45; break; } return mii; } EXPORT_SYMBOL_GPL(mdio_i2c_alloc); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("MDIO I2C bridge library"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-i2c.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/usb.h> #define USB_MARVELL_VID 0x1286 static const struct usb_device_id mvusb_mdio_table[] = { { USB_DEVICE(USB_MARVELL_VID, 0x1fa4) }, {} }; MODULE_DEVICE_TABLE(usb, mvusb_mdio_table); enum { MVUSB_CMD_PREAMBLE0, MVUSB_CMD_PREAMBLE1, MVUSB_CMD_ADDR, MVUSB_CMD_VAL, }; struct mvusb_mdio { struct usb_device *udev; struct mii_bus *mdio; __le16 buf[4]; }; static int mvusb_mdio_read(struct mii_bus *mdio, int dev, int reg) { struct mvusb_mdio *mvusb = mdio->priv; int err, alen; mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0xa400 | (dev << 5) | reg); err = usb_bulk_msg(mvusb->udev, usb_sndbulkpipe(mvusb->udev, 2), mvusb->buf, 6, &alen, 100); if (err) return err; err = usb_bulk_msg(mvusb->udev, usb_rcvbulkpipe(mvusb->udev, 6), &mvusb->buf[MVUSB_CMD_VAL], 2, &alen, 100); if (err) return err; return le16_to_cpu(mvusb->buf[MVUSB_CMD_VAL]); } static int mvusb_mdio_write(struct mii_bus *mdio, int dev, int reg, u16 val) { struct mvusb_mdio *mvusb = mdio->priv; int alen; mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0x8000 | (dev << 5) | reg); mvusb->buf[MVUSB_CMD_VAL] = cpu_to_le16(val); return usb_bulk_msg(mvusb->udev, usb_sndbulkpipe(mvusb->udev, 2), mvusb->buf, 8, &alen, 100); } static int mvusb_mdio_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct device *dev = &interface->dev; struct mvusb_mdio *mvusb; struct mii_bus *mdio; int ret; mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb)); if (!mdio) return -ENOMEM; mvusb = mdio->priv; mvusb->mdio = mdio; mvusb->udev = usb_get_dev(interface_to_usbdev(interface)); /* Reversed from USB PCAPs, no idea what these mean. */ mvusb->buf[MVUSB_CMD_PREAMBLE0] = cpu_to_le16(0xe800); mvusb->buf[MVUSB_CMD_PREAMBLE1] = cpu_to_le16(0x0001); snprintf(mdio->id, MII_BUS_ID_SIZE, "mvusb-%s", dev_name(dev)); mdio->name = mdio->id; mdio->parent = dev; mdio->read = mvusb_mdio_read; mdio->write = mvusb_mdio_write; usb_set_intfdata(interface, mvusb); ret = of_mdiobus_register(mdio, dev->of_node); if (ret) goto put_dev; return 0; put_dev: usb_put_dev(mvusb->udev); return ret; } static void mvusb_mdio_disconnect(struct usb_interface *interface) { struct mvusb_mdio *mvusb = usb_get_intfdata(interface); struct usb_device *udev = mvusb->udev; mdiobus_unregister(mvusb->mdio); usb_set_intfdata(interface, NULL); usb_put_dev(udev); } static struct usb_driver mvusb_mdio_driver = { .name = "mvusb_mdio", .id_table = mvusb_mdio_table, .probe = mvusb_mdio_probe, .disconnect = mvusb_mdio_disconnect, }; module_usb_driver(mvusb_mdio_driver); MODULE_AUTHOR("Tobias Waldekranz <[email protected]>"); MODULE_DESCRIPTION("Marvell USB MDIO Adapter"); MODULE_LICENSE("GPL");
linux-master
drivers/net/mdio/mdio-mvusb.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009-2016 Cavium, Inc. */ #include <linux/acpi.h> #include <linux/gfp.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/pci.h> #include <linux/phy.h> #include "mdio-cavium.h" struct thunder_mdiobus_nexus { void __iomem *bar0; struct cavium_mdiobus *buses[4]; }; static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device_node *node; struct fwnode_handle *fwn; struct thunder_mdiobus_nexus *nexus; int err; int i; nexus = devm_kzalloc(&pdev->dev, sizeof(*nexus), GFP_KERNEL); if (!nexus) return -ENOMEM; pci_set_drvdata(pdev, nexus); err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); return err; } err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { dev_err(&pdev->dev, "pci_request_regions failed\n"); goto err_disable_device; } nexus->bar0 = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!nexus->bar0) { err = -ENOMEM; goto err_release_regions; } i = 0; device_for_each_child_node(&pdev->dev, fwn) { struct resource r; struct mii_bus *mii_bus; struct cavium_mdiobus *bus; union cvmx_smix_en smi_en; /* If it is not an OF node we cannot handle it yet, so * exit the loop. */ node = to_of_node(fwn); if (!node) break; err = of_address_to_resource(node, 0, &r); if (err) { dev_err(&pdev->dev, "Couldn't translate address for \"%pOFn\"\n", node); break; } mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus)); if (!mii_bus) break; bus = mii_bus->priv; bus->mii_bus = mii_bus; nexus->buses[i] = bus; i++; bus->register_base = nexus->bar0 + r.start - pci_resource_start(pdev, 0); smi_en.u64 = 0; smi_en.s.en = 1; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); bus->mii_bus->name = KBUILD_MODNAME; snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", r.start); bus->mii_bus->parent = &pdev->dev; bus->mii_bus->read = cavium_mdiobus_read_c22; bus->mii_bus->write = cavium_mdiobus_write_c22; bus->mii_bus->read_c45 = cavium_mdiobus_read_c45; bus->mii_bus->write_c45 = cavium_mdiobus_write_c45; err = of_mdiobus_register(bus->mii_bus, node); if (err) dev_err(&pdev->dev, "of_mdiobus_register failed\n"); dev_info(&pdev->dev, "Added bus at %llx\n", r.start); if (i >= ARRAY_SIZE(nexus->buses)) break; } fwnode_handle_put(fwn); return 0; err_release_regions: pci_release_regions(pdev); err_disable_device: pci_set_drvdata(pdev, NULL); return err; } static void thunder_mdiobus_pci_remove(struct pci_dev *pdev) { int i; struct thunder_mdiobus_nexus *nexus = pci_get_drvdata(pdev); for (i = 0; i < ARRAY_SIZE(nexus->buses); i++) { struct cavium_mdiobus *bus = nexus->buses[i]; if (!bus) continue; mdiobus_unregister(bus->mii_bus); oct_mdio_writeq(0, bus->register_base + SMI_EN); } pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); } static const struct pci_device_id thunder_mdiobus_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa02b) }, { 0, } /* End of table. */ }; MODULE_DEVICE_TABLE(pci, thunder_mdiobus_id_table); static struct pci_driver thunder_mdiobus_driver = { .name = KBUILD_MODNAME, .id_table = thunder_mdiobus_id_table, .probe = thunder_mdiobus_pci_probe, .remove = thunder_mdiobus_pci_remove, }; module_pci_driver(thunder_mdiobus_driver); MODULE_DESCRIPTION("Cavium ThunderX MDIO bus driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-thunder.c
// SPDX-License-Identifier: GPL-2.0 /* Qualcomm IPQ8064 MDIO interface driver * * Copyright (C) 2019 Christian Lamparter <[email protected]> * Copyright (C) 2020 Ansuel Smith <[email protected]> */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/regmap.h> /* MII address register definitions */ #define MII_ADDR_REG_ADDR 0x10 #define MII_BUSY BIT(0) #define MII_WRITE BIT(1) #define MII_CLKRANGE(x) ((x) << 2) #define MII_CLKRANGE_60_100M MII_CLKRANGE(0) #define MII_CLKRANGE_100_150M MII_CLKRANGE(1) #define MII_CLKRANGE_20_35M MII_CLKRANGE(2) #define MII_CLKRANGE_35_60M MII_CLKRANGE(3) #define MII_CLKRANGE_150_250M MII_CLKRANGE(4) #define MII_CLKRANGE_250_300M MII_CLKRANGE(5) #define MII_CLKRANGE_MASK GENMASK(4, 2) #define MII_REG_SHIFT 6 #define MII_REG_MASK GENMASK(10, 6) #define MII_ADDR_SHIFT 11 #define MII_ADDR_MASK GENMASK(15, 11) #define MII_DATA_REG_ADDR 0x14 #define MII_MDIO_DELAY_USEC (1000) #define MII_MDIO_RETRY_MSEC (10) struct ipq8064_mdio { struct regmap *base; /* NSS_GMAC0_BASE */ }; static int ipq8064_mdio_wait_busy(struct ipq8064_mdio *priv) { u32 busy; return regmap_read_poll_timeout(priv->base, MII_ADDR_REG_ADDR, busy, !(busy & MII_BUSY), MII_MDIO_DELAY_USEC, MII_MDIO_RETRY_MSEC * USEC_PER_MSEC); } static int ipq8064_mdio_read(struct mii_bus *bus, int phy_addr, int reg_offset) { u32 miiaddr = MII_BUSY | MII_CLKRANGE_250_300M; struct ipq8064_mdio *priv = bus->priv; u32 ret_val; int err; miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) | ((reg_offset << MII_REG_SHIFT) & MII_REG_MASK); regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr); usleep_range(10, 13); err = ipq8064_mdio_wait_busy(priv); if (err) return err; regmap_read(priv->base, MII_DATA_REG_ADDR, &ret_val); return (int)ret_val; } static int ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data) { u32 miiaddr = MII_WRITE | MII_BUSY | MII_CLKRANGE_250_300M; struct ipq8064_mdio *priv = bus->priv; regmap_write(priv->base, MII_DATA_REG_ADDR, data); miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) | ((reg_offset << MII_REG_SHIFT) & MII_REG_MASK); regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr); /* For the specific reg 31 extra time is needed or the next * read will produce garbage data. */ if (reg_offset == 31) usleep_range(30, 43); else usleep_range(10, 13); return ipq8064_mdio_wait_busy(priv); } static const struct regmap_config ipq8064_mdio_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .can_multi_write = false, /* the mdio lock is used by any user of this mdio driver */ .disable_locking = true, .cache_type = REGCACHE_NONE, }; static int ipq8064_mdio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct ipq8064_mdio *priv; struct resource res; struct mii_bus *bus; void __iomem *base; int ret; if (of_address_to_resource(np, 0, &res)) return -ENOMEM; base = devm_ioremap(&pdev->dev, res.start, resource_size(&res)); if (!base) return -ENOMEM; bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv)); if (!bus) return -ENOMEM; bus->name = "ipq8064_mdio_bus"; bus->read = ipq8064_mdio_read; bus->write = ipq8064_mdio_write; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); bus->parent = &pdev->dev; priv = bus->priv; priv->base = devm_regmap_init_mmio(&pdev->dev, base, &ipq8064_mdio_regmap_config); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); ret = of_mdiobus_register(bus, np); if (ret) return ret; platform_set_drvdata(pdev, bus); return 0; } static int ipq8064_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); return 0; } static const struct of_device_id ipq8064_mdio_dt_ids[] = { { .compatible = "qcom,ipq8064-mdio" }, { } }; MODULE_DEVICE_TABLE(of, ipq8064_mdio_dt_ids); static struct platform_driver ipq8064_mdio_driver = { .probe = ipq8064_mdio_probe, .remove = ipq8064_mdio_remove, .driver = { .name = "ipq8064-mdio", .of_match_table = ipq8064_mdio_dt_ids, }, }; module_platform_driver(ipq8064_mdio_driver); MODULE_DESCRIPTION("Qualcomm IPQ8064 MDIO interface driver"); MODULE_AUTHOR("Christian Lamparter <[email protected]>"); MODULE_AUTHOR("Ansuel Smith <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/mdio/mdio-ipq8064.c
// SPDX-License-Identifier: GPL-2.0 /* * Allwinner EMAC MDIO interface driver * * Copyright 2012-2013 Stefan Roese <[email protected]> * Copyright 2013 Maxime Ripard <[email protected]> * * Based on the Linux driver provided by Allwinner: * Copyright (C) 1997 Sten Wang */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #define EMAC_MAC_MCMD_REG (0x00) #define EMAC_MAC_MADR_REG (0x04) #define EMAC_MAC_MWTD_REG (0x08) #define EMAC_MAC_MRDD_REG (0x0c) #define EMAC_MAC_MIND_REG (0x10) #define EMAC_MAC_SSRR_REG (0x14) #define MDIO_TIMEOUT (msecs_to_jiffies(100)) struct sun4i_mdio_data { void __iomem *membase; struct regulator *regulator; }; static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct sun4i_mdio_data *data = bus->priv; unsigned long timeout_jiffies; int value; /* issue the phy address and reg */ writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG); /* pull up the phy io line */ writel(0x1, data->membase + EMAC_MAC_MCMD_REG); /* Wait read complete */ timeout_jiffies = jiffies + MDIO_TIMEOUT; while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { if (time_is_before_jiffies(timeout_jiffies)) return -ETIMEDOUT; msleep(1); } /* push down the phy io line */ writel(0x0, data->membase + EMAC_MAC_MCMD_REG); /* and read data */ value = readl(data->membase + EMAC_MAC_MRDD_REG); return value; } static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct sun4i_mdio_data *data = bus->priv; unsigned long timeout_jiffies; /* issue the phy address and reg */ writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG); /* pull up the phy io line */ writel(0x1, data->membase + EMAC_MAC_MCMD_REG); /* Wait read complete */ timeout_jiffies = jiffies + MDIO_TIMEOUT; while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { if (time_is_before_jiffies(timeout_jiffies)) return -ETIMEDOUT; msleep(1); } /* push down the phy io line */ writel(0x0, data->membase + EMAC_MAC_MCMD_REG); /* and write data */ writel(value, data->membase + EMAC_MAC_MWTD_REG); return 0; } static int sun4i_mdio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct sun4i_mdio_data *data; int ret; bus = mdiobus_alloc_size(sizeof(*data)); if (!bus) return -ENOMEM; bus->name = "sun4i_mii_bus"; bus->read = &sun4i_mdio_read; bus->write = &sun4i_mdio_write; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); bus->parent = &pdev->dev; data = bus->priv; data->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->membase)) { ret = PTR_ERR(data->membase); goto err_out_free_mdiobus; } data->regulator = devm_regulator_get(&pdev->dev, "phy"); if (IS_ERR(data->regulator)) { if (PTR_ERR(data->regulator) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_out_free_mdiobus; } dev_info(&pdev->dev, "no regulator found\n"); data->regulator = NULL; } else { ret = regulator_enable(data->regulator); if (ret) goto err_out_free_mdiobus; } ret = of_mdiobus_register(bus, np); if (ret < 0) goto err_out_disable_regulator; platform_set_drvdata(pdev, bus); return 0; err_out_disable_regulator: if (data->regulator) regulator_disable(data->regulator); err_out_free_mdiobus: mdiobus_free(bus); return ret; } static int sun4i_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct sun4i_mdio_data *data = bus->priv; mdiobus_unregister(bus); if (data->regulator) regulator_disable(data->regulator); mdiobus_free(bus); return 0; } static const struct of_device_id sun4i_mdio_dt_ids[] = { { .compatible = "allwinner,sun4i-a10-mdio" }, /* Deprecated */ { .compatible = "allwinner,sun4i-mdio" }, { } }; MODULE_DEVICE_TABLE(of, sun4i_mdio_dt_ids); static struct platform_driver sun4i_mdio_driver = { .probe = sun4i_mdio_probe, .remove = sun4i_mdio_remove, .driver = { .name = "sun4i-mdio", .of_match_table = sun4i_mdio_dt_ids, }, }; module_platform_driver(sun4i_mdio_driver); MODULE_DESCRIPTION("Allwinner EMAC MDIO interface driver"); MODULE_AUTHOR("Maxime Ripard <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-sun4i.c
// SPDX-License-Identifier: GPL-2.0+ /* Applied Micro X-Gene SoC MDIO Driver * * Copyright (c) 2016, Applied Micro Circuits Corporation * Author: Iyappan Subramanian <[email protected]> */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/efi.h> #include <linux/if_vlan.h> #include <linux/io.h> #include <linux/mdio/mdio-xgene.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_platform.h> #include <linux/phy.h> #include <linux/prefetch.h> #include <net/ip.h> u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr) { void __iomem *addr, *rd, *cmd, *cmd_done; u32 done, rd_data = BUSY_MASK; u8 wait = 10; addr = pdata->mac_csr_addr + MAC_ADDR_REG_OFFSET; rd = pdata->mac_csr_addr + MAC_READ_REG_OFFSET; cmd = pdata->mac_csr_addr + MAC_COMMAND_REG_OFFSET; cmd_done = pdata->mac_csr_addr + MAC_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->mac_lock); iowrite32(rd_addr, addr); iowrite32(XGENE_ENET_RD_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (done) rd_data = ioread32(rd); iowrite32(0, cmd); spin_unlock(&pdata->mac_lock); return rd_data; } EXPORT_SYMBOL(xgene_mdio_rd_mac); void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data) { void __iomem *addr, *wr, *cmd, *cmd_done; u8 wait = 10; u32 done; addr = pdata->mac_csr_addr + MAC_ADDR_REG_OFFSET; wr = pdata->mac_csr_addr + MAC_WRITE_REG_OFFSET; cmd = pdata->mac_csr_addr + MAC_COMMAND_REG_OFFSET; cmd_done = pdata->mac_csr_addr + MAC_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->mac_lock); iowrite32(wr_addr, addr); iowrite32(data, wr); iowrite32(XGENE_ENET_WR_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) pr_err("MCX mac write failed, addr: 0x%04x\n", wr_addr); iowrite32(0, cmd); spin_unlock(&pdata->mac_lock); } EXPORT_SYMBOL(xgene_mdio_wr_mac); int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg) { struct xgene_mdio_pdata *pdata = bus->priv; u32 data, done; u8 wait = 10; data = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg); xgene_mdio_wr_mac(pdata, MII_MGMT_ADDRESS_ADDR, data); xgene_mdio_wr_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); do { usleep_range(5, 10); done = xgene_mdio_rd_mac(pdata, MII_MGMT_INDICATORS_ADDR); } while ((done & BUSY_MASK) && wait--); if (done & BUSY_MASK) { dev_err(&bus->dev, "MII_MGMT read failed\n"); return -EBUSY; } data = xgene_mdio_rd_mac(pdata, MII_MGMT_STATUS_ADDR); xgene_mdio_wr_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); return data; } EXPORT_SYMBOL(xgene_mdio_rgmii_read); int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data) { struct xgene_mdio_pdata *pdata = bus->priv; u32 val, done; u8 wait = 10; val = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg); xgene_mdio_wr_mac(pdata, MII_MGMT_ADDRESS_ADDR, val); xgene_mdio_wr_mac(pdata, MII_MGMT_CONTROL_ADDR, data); do { usleep_range(5, 10); done = xgene_mdio_rd_mac(pdata, MII_MGMT_INDICATORS_ADDR); } while ((done & BUSY_MASK) && wait--); if (done & BUSY_MASK) { dev_err(&bus->dev, "MII_MGMT write failed\n"); return -EBUSY; } return 0; } EXPORT_SYMBOL(xgene_mdio_rgmii_write); static u32 xgene_menet_rd_diag_csr(struct xgene_mdio_pdata *pdata, u32 offset) { return ioread32(pdata->diag_csr_addr + offset); } static void xgene_menet_wr_diag_csr(struct xgene_mdio_pdata *pdata, u32 offset, u32 val) { iowrite32(val, pdata->diag_csr_addr + offset); } static int xgene_enet_ecc_init(struct xgene_mdio_pdata *pdata) { u32 data; u8 wait = 10; xgene_menet_wr_diag_csr(pdata, MENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); do { usleep_range(100, 110); data = xgene_menet_rd_diag_csr(pdata, MENET_BLOCK_MEM_RDY_ADDR); } while ((data != 0xffffffff) && wait--); if (data != 0xffffffff) { dev_err(pdata->dev, "Failed to release memory from shutdown\n"); return -ENODEV; } return 0; } static void xgene_gmac_reset(struct xgene_mdio_pdata *pdata) { xgene_mdio_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET); xgene_mdio_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0); } static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata) { int ret; if (pdata->dev->of_node) { clk_prepare_enable(pdata->clk); udelay(5); clk_disable_unprepare(pdata->clk); udelay(5); clk_prepare_enable(pdata->clk); udelay(5); } else { #ifdef CONFIG_ACPI acpi_evaluate_object(ACPI_HANDLE(pdata->dev), "_RST", NULL, NULL); #endif } ret = xgene_enet_ecc_init(pdata); if (ret) { if (pdata->dev->of_node) clk_disable_unprepare(pdata->clk); return ret; } xgene_gmac_reset(pdata); return 0; } static void xgene_enet_rd_mdio_csr(void __iomem *base_addr, u32 offset, u32 *val) { void __iomem *addr = base_addr + offset; *val = ioread32(addr); } static void xgene_enet_wr_mdio_csr(void __iomem *base_addr, u32 offset, u32 val) { void __iomem *addr = base_addr + offset; iowrite32(val, addr); } static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) { void __iomem *addr = (void __iomem *)bus->priv; int timeout = 100; u32 status, val; val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) | SET_VAL(HSTMIIMWRDAT, data); xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val); val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); do { usleep_range(5, 10); xgene_enet_rd_mdio_csr(addr, MIIM_INDICATOR_ADDR, &status); } while ((status & BUSY_MASK) && timeout--); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, 0); return 0; } static int xgene_xfi_mdio_read(struct mii_bus *bus, int phy_id, int reg) { void __iomem *addr = (void __iomem *)bus->priv; u32 data, status, val; int timeout = 100; val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg); xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val); val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_READ); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); do { usleep_range(5, 10); xgene_enet_rd_mdio_csr(addr, MIIM_INDICATOR_ADDR, &status); } while ((status & BUSY_MASK) && timeout--); if (status & BUSY_MASK) { pr_err("XGENET_MII_MGMT write failed\n"); return -EBUSY; } xgene_enet_rd_mdio_csr(addr, MIIMRD_FIELD_ADDR, &data); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, 0); return data; } struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr) { struct phy_device *phy_dev; phy_dev = get_phy_device(bus, phy_addr, false); if (!phy_dev || IS_ERR(phy_dev)) return NULL; if (phy_device_register(phy_dev)) phy_device_free(phy_dev); return phy_dev; } EXPORT_SYMBOL(xgene_enet_phy_register); #ifdef CONFIG_ACPI static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl, void *context, void **ret) { struct mii_bus *mdio = context; struct acpi_device *adev; struct phy_device *phy_dev; const union acpi_object *obj; u32 phy_addr; adev = acpi_fetch_acpi_dev(handle); if (!adev) return AE_OK; if (acpi_dev_get_property(adev, "phy-channel", ACPI_TYPE_INTEGER, &obj)) return AE_OK; phy_addr = obj->integer.value; phy_dev = xgene_enet_phy_register(mdio, phy_addr); adev->driver_data = phy_dev; return AE_OK; } #endif static const struct of_device_id xgene_mdio_of_match[] = { { .compatible = "apm,xgene-mdio-rgmii", .data = (void *)XGENE_MDIO_RGMII }, { .compatible = "apm,xgene-mdio-xfi", .data = (void *)XGENE_MDIO_XFI }, {}, }; MODULE_DEVICE_TABLE(of, xgene_mdio_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_mdio_acpi_match[] = { { "APMC0D65", XGENE_MDIO_RGMII }, { "APMC0D66", XGENE_MDIO_XFI }, { } }; MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match); #endif static int xgene_mdio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mii_bus *mdio_bus; const struct of_device_id *of_id; struct xgene_mdio_pdata *pdata; void __iomem *csr_base; int mdio_id = 0, ret = 0; of_id = of_match_device(xgene_mdio_of_match, &pdev->dev); if (of_id) { mdio_id = (uintptr_t)of_id->data; } else { #ifdef CONFIG_ACPI const struct acpi_device_id *acpi_id; acpi_id = acpi_match_device(xgene_mdio_acpi_match, &pdev->dev); if (acpi_id) mdio_id = (enum xgene_mdio_id)acpi_id->driver_data; #endif } if (!mdio_id) return -ENODEV; pdata = devm_kzalloc(dev, sizeof(struct xgene_mdio_pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; pdata->mdio_id = mdio_id; pdata->dev = dev; csr_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csr_base)) return PTR_ERR(csr_base); pdata->mac_csr_addr = csr_base; pdata->mdio_csr_addr = csr_base + BLOCK_XG_MDIO_CSR_OFFSET; pdata->diag_csr_addr = csr_base + BLOCK_DIAG_CSR_OFFSET; if (mdio_id == XGENE_MDIO_RGMII) spin_lock_init(&pdata->mac_lock); if (dev->of_node) { pdata->clk = devm_clk_get(dev, NULL); if (IS_ERR(pdata->clk)) { dev_err(dev, "Unable to retrieve clk\n"); return PTR_ERR(pdata->clk); } } ret = xgene_mdio_reset(pdata); if (ret) return ret; mdio_bus = mdiobus_alloc(); if (!mdio_bus) { ret = -ENOMEM; goto out_clk; } mdio_bus->name = "APM X-Gene MDIO bus"; if (mdio_id == XGENE_MDIO_RGMII) { mdio_bus->read = xgene_mdio_rgmii_read; mdio_bus->write = xgene_mdio_rgmii_write; mdio_bus->priv = (void __force *)pdata; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s", "xgene-mii-rgmii"); } else { mdio_bus->read = xgene_xfi_mdio_read; mdio_bus->write = xgene_xfi_mdio_write; mdio_bus->priv = (void __force *)pdata->mdio_csr_addr; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s", "xgene-mii-xfi"); } mdio_bus->parent = dev; platform_set_drvdata(pdev, pdata); if (dev->of_node) { ret = of_mdiobus_register(mdio_bus, dev->of_node); } else { #ifdef CONFIG_ACPI /* Mask out all PHYs from auto probing. */ mdio_bus->phy_mask = ~0; ret = mdiobus_register(mdio_bus); if (ret) goto out_mdiobus; acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1, acpi_register_phy, NULL, mdio_bus, NULL); #endif } if (ret) goto out_mdiobus; pdata->mdio_bus = mdio_bus; return 0; out_mdiobus: mdiobus_free(mdio_bus); out_clk: if (dev->of_node) clk_disable_unprepare(pdata->clk); return ret; } static int xgene_mdio_remove(struct platform_device *pdev) { struct xgene_mdio_pdata *pdata = platform_get_drvdata(pdev); struct mii_bus *mdio_bus = pdata->mdio_bus; struct device *dev = &pdev->dev; mdiobus_unregister(mdio_bus); mdiobus_free(mdio_bus); if (dev->of_node) clk_disable_unprepare(pdata->clk); return 0; } static struct platform_driver xgene_mdio_driver = { .driver = { .name = "xgene-mdio", .of_match_table = of_match_ptr(xgene_mdio_of_match), .acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match), }, .probe = xgene_mdio_probe, .remove = xgene_mdio_remove, }; module_platform_driver(xgene_mdio_driver); MODULE_DESCRIPTION("APM X-Gene SoC MDIO driver"); MODULE_AUTHOR("Iyappan Subramanian <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/mdio/mdio-xgene.c